From 8c9b3326297dfbcba55913182f865e13734d4b41 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Wed, 2 Oct 2024 22:39:45 +0700 Subject: [PATCH 001/140] fix(ci): Bring back hack for contracts build till full migration to foundry (#3000) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Creation of empty dirs, to support building from both new (foundry-built) and old contracts ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- .github/workflows/build-core-template.yml | 4 ++++ docker/Makefile | 4 +++- docker/external-node/Dockerfile | 2 ++ docker/server-v2/Dockerfile | 2 ++ 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 9d00f98b181..deaf087cd3e 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -71,11 +71,15 @@ jobs: if [ $(jq length <<<"$tags") -eq 0 ]; then echo "No tag found on all pages." echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/artifacts/ exit 0 fi filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") if [[ ! -z "$filtered_tag" ]]; then echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/out break fi ((page++)) diff --git a/docker/Makefile b/docker/Makefile index 72189902aa1..444a94ce221 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -61,12 +61,14 @@ check-contracts: fi # Build and download needed contracts +# TODO Remove mkdir once we use foundry inside contracts repo prepare-contracts: check-tools check-contracts @cd ../ && \ export ZKSYNC_HOME=$$(pwd) && \ export PATH=$$PATH:$${ZKSYNC_HOME}/bin && \ zkt || true && \ - zk_supervisor contracts + zk_supervisor contracts && \ + mkdir -p contracts/l1-contracts/artifacts # Download setup-key prepare-keys: diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index aa1089ae7b3..1012eecfc16 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -29,6 +29,8 @@ COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /c COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ +# TODO Remove once we use foundry inside contracts repo +COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index 3e8b4f16bca..13a39133327 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -37,6 +37,8 @@ COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /c COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ +# TODO Remove once we use foundry inside contracts repo +COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ From 8d24eb5f813efefd756906e41d8bbaaa3c92eb8f Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Wed, 2 Oct 2024 21:07:14 +0400 Subject: [PATCH 002/140] chore(main): release core 24.28.0 (#2968) :robot: I have created a release *beep* *boop* --- ## [24.28.0](https://github.com/matter-labs/zksync-era/compare/core-v24.27.0...core-v24.28.0) (2024-10-02) ### Features * **da-clients:** add secrets ([#2954](https://github.com/matter-labs/zksync-era/issues/2954)) ([f4631e4](https://github.com/matter-labs/zksync-era/commit/f4631e4466de620cc1401b326d864cdb8b48a05d)) * **eth-sender:** add a cap to time_in_mempool ([#2978](https://github.com/matter-labs/zksync-era/issues/2978)) ([650d42f](https://github.com/matter-labs/zksync-era/commit/650d42fea6124d80b60a8270a303d72ad6ac741e)) * **eth-watch:** redesign to support multiple chains ([#2867](https://github.com/matter-labs/zksync-era/issues/2867)) ([aa72d84](https://github.com/matter-labs/zksync-era/commit/aa72d849c24a664acd083eba73795ddc5d31d55f)) * Expose http debug page ([#2952](https://github.com/matter-labs/zksync-era/issues/2952)) ([e0b6488](https://github.com/matter-labs/zksync-era/commit/e0b64888aae7324aec2d40fa0cd51ea7e1450cd9)) * **zk_toolbox:** add fees integration test to toolbox ([#2898](https://github.com/matter-labs/zksync-era/issues/2898)) ([e7ead76](https://github.com/matter-labs/zksync-era/commit/e7ead760ce0417dd36af3839ac557f7e9ab238a4)) * **zk_toolbox:** Add SQL format for zk supervisor ([#2950](https://github.com/matter-labs/zksync-era/issues/2950)) ([540e5d7](https://github.com/matter-labs/zksync-era/commit/540e5d7554f54e80d52f1bfae37e03ca8f787baf)) ### Bug Fixes * **api:** Fix batch fee input for `debug` namespace ([#2948](https://github.com/matter-labs/zksync-era/issues/2948)) ([79b6fcf](https://github.com/matter-labs/zksync-era/commit/79b6fcf8b5d10a0ccdceb846370dd6870b6a32b5)) * chainstack block limit exceeded ([#2974](https://github.com/matter-labs/zksync-era/issues/2974)) ([4ffbf42](https://github.com/matter-labs/zksync-era/commit/4ffbf426de166c11aaf5d7b5ed7d199644fba229)) * **eth-watch:** add missing check that from_block is not larger than finalized_block ([#2969](https://github.com/matter-labs/zksync-era/issues/2969)) ([3f406c7](https://github.com/matter-labs/zksync-era/commit/3f406c7d0c0e76d798c2d838abde57ca692822c0)) * ignore unknown fields in rpc json response ([#2962](https://github.com/matter-labs/zksync-era/issues/2962)) ([692ea73](https://github.com/matter-labs/zksync-era/commit/692ea73f75a5fb9db2b4ac33ad24d20568638742)) ### Performance Improvements * **api:** More efficient gas estimation ([#2937](https://github.com/matter-labs/zksync-era/issues/2937)) ([3b69e37](https://github.com/matter-labs/zksync-era/commit/3b69e37e470dab859a55787f6cc971e7083de2fd)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --------- Co-authored-by: zksync-era-bot --- .github/release-please/manifest.json | 2 +- Cargo.lock | 2 +- core/CHANGELOG.md | 25 +++++++++++++++++++++++++ core/bin/external_node/Cargo.toml | 2 +- 4 files changed, 28 insertions(+), 3 deletions(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 44e10fb13fd..e0e8fbeecf7 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.27.0", + "core": "24.28.0", "prover": "16.5.0", "zk_toolbox": "0.1.2" } diff --git a/Cargo.lock b/Cargo.lock index 127921ba3e9..0873faae904 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10178,7 +10178,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.27.0" +version = "24.28.0" dependencies = [ "anyhow", "assert_matches", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 6cf2ff4419a..b2f27a6630c 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,30 @@ # Changelog +## [24.28.0](https://github.com/matter-labs/zksync-era/compare/core-v24.27.0...core-v24.28.0) (2024-10-02) + + +### Features + +* **da-clients:** add secrets ([#2954](https://github.com/matter-labs/zksync-era/issues/2954)) ([f4631e4](https://github.com/matter-labs/zksync-era/commit/f4631e4466de620cc1401b326d864cdb8b48a05d)) +* **eth-sender:** add a cap to time_in_mempool ([#2978](https://github.com/matter-labs/zksync-era/issues/2978)) ([650d42f](https://github.com/matter-labs/zksync-era/commit/650d42fea6124d80b60a8270a303d72ad6ac741e)) +* **eth-watch:** redesign to support multiple chains ([#2867](https://github.com/matter-labs/zksync-era/issues/2867)) ([aa72d84](https://github.com/matter-labs/zksync-era/commit/aa72d849c24a664acd083eba73795ddc5d31d55f)) +* Expose http debug page ([#2952](https://github.com/matter-labs/zksync-era/issues/2952)) ([e0b6488](https://github.com/matter-labs/zksync-era/commit/e0b64888aae7324aec2d40fa0cd51ea7e1450cd9)) +* **zk_toolbox:** add fees integration test to toolbox ([#2898](https://github.com/matter-labs/zksync-era/issues/2898)) ([e7ead76](https://github.com/matter-labs/zksync-era/commit/e7ead760ce0417dd36af3839ac557f7e9ab238a4)) +* **zk_toolbox:** Add SQL format for zk supervisor ([#2950](https://github.com/matter-labs/zksync-era/issues/2950)) ([540e5d7](https://github.com/matter-labs/zksync-era/commit/540e5d7554f54e80d52f1bfae37e03ca8f787baf)) + + +### Bug Fixes + +* **api:** Fix batch fee input for `debug` namespace ([#2948](https://github.com/matter-labs/zksync-era/issues/2948)) ([79b6fcf](https://github.com/matter-labs/zksync-era/commit/79b6fcf8b5d10a0ccdceb846370dd6870b6a32b5)) +* chainstack block limit exceeded ([#2974](https://github.com/matter-labs/zksync-era/issues/2974)) ([4ffbf42](https://github.com/matter-labs/zksync-era/commit/4ffbf426de166c11aaf5d7b5ed7d199644fba229)) +* **eth-watch:** add missing check that from_block is not larger than finalized_block ([#2969](https://github.com/matter-labs/zksync-era/issues/2969)) ([3f406c7](https://github.com/matter-labs/zksync-era/commit/3f406c7d0c0e76d798c2d838abde57ca692822c0)) +* ignore unknown fields in rpc json response ([#2962](https://github.com/matter-labs/zksync-era/issues/2962)) ([692ea73](https://github.com/matter-labs/zksync-era/commit/692ea73f75a5fb9db2b4ac33ad24d20568638742)) + + +### Performance Improvements + +* **api:** More efficient gas estimation ([#2937](https://github.com/matter-labs/zksync-era/issues/2937)) ([3b69e37](https://github.com/matter-labs/zksync-era/commit/3b69e37e470dab859a55787f6cc971e7083de2fd)) + ## [24.27.0](https://github.com/matter-labs/zksync-era/compare/core-v24.26.0...core-v24.27.0) (2024-09-25) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index d841ee5b42e..086d381ecc3 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.27.0" # x-release-please-version +version = "24.28.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true From 057705e55c3816d89478994d6f6c63b08a19156b Mon Sep 17 00:00:00 2001 From: Alexander Melnikov Date: Wed, 2 Oct 2024 12:38:54 -0600 Subject: [PATCH 003/140] feat(zk_toolbox): Add subcommands and flags for chain registration (#2946) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Added subcommands: `zki chain init configs` - just creates configs with an intention to run chain initialization manually via subcommands `zki chain register-chain` - runs steps from `RegisterHyperchain.s.sol` `zki chain accept-chain-ownership` - accepts ownership for `DiamondProxy` `zki chain genesis database` - initializes database only, performs migration (uses values from args or `secrets.yaml`) `zki chain genesis server` - runs server --genesis Added flags: `zki ecosystem init --ecosystem-only` - runs `init` only for ecosystem (skips `init` for chain) Other changes: * Fixed issue with `--wallet_path` value ignored * Nullify database names if `zki ecosystem init` is used for multiple chains * Zeroify some addresses in `contracts.yaml` when copying from ecosystem during init ## Why ❔ These changes allow us to run the chain registration process for externally hosted chains. Not ideal yet, but the process goes like this: L1 side: * Init ecosystem: `zki ecosystem create && zki ecosystem init --ecosystem-only && zki chain init configs` * Fill in wallets * Register chain: `zki chain register-chain` * Deploy L2 contracts: `zki chain deploy-l2-contracts` * Share `contracts.yaml` L2 side: * Init ecosystem: `zki ecosystem create && zki ecosystem init --ecosystem-only && zki chain init configs` * Fill in wallets * Copy `contracts.yaml` * Accept ownership: `zki chain accept-chain-ownership` * Initialize databases: `zki chain genesis database` * Run server genesis: `zki chain genesis server` ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- yarn.lock | 31 +---- .../commands/chain/accept_chain_ownership.rs | 42 ++++++ .../src/commands/chain/args/create.rs | 2 +- .../src/commands/chain/args/genesis.rs | 41 ++++++ .../src/commands/chain/args/init/configs.rs | 70 ++++++++++ .../chain/args/{init.rs => init/mod.rs} | 5 +- .../src/commands/chain/build_transactions.rs | 5 +- .../zk_inception/src/commands/chain/common.rs | 58 +------- .../chain/{genesis.rs => genesis/database.rs} | 129 +++++++----------- .../src/commands/chain/genesis/mod.rs | 94 +++++++++++++ .../src/commands/chain/genesis/server.rs | 46 +++++++ .../src/commands/chain/init/configs.rs | 107 +++++++++++++++ .../commands/chain/{init.rs => init/mod.rs} | 127 ++++++++--------- .../zk_inception/src/commands/chain/mod.rs | 42 ++++-- .../src/commands/chain/register_chain.rs | 96 +++++++++++++ .../src/commands/ecosystem/args/init.rs | 35 +++-- .../src/commands/ecosystem/init.rs | 102 ++++++++------ zk_toolbox/crates/zk_inception/src/main.rs | 8 +- .../crates/zk_inception/src/messages.rs | 11 +- 19 files changed, 732 insertions(+), 319 deletions(-) create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs rename zk_toolbox/crates/zk_inception/src/commands/chain/args/{init.rs => init/mod.rs} (96%) rename zk_toolbox/crates/zk_inception/src/commands/chain/{genesis.rs => genesis/database.rs} (63%) create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs rename zk_toolbox/crates/zk_inception/src/commands/chain/{init.rs => init/mod.rs} (55%) create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs diff --git a/yarn.lock b/yarn.lock index 3c764c7c7b7..531f49abc00 100644 --- a/yarn.lock +++ b/yarn.lock @@ -9816,7 +9816,7 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -"string-width-cjs@npm:string-width@^4.2.0": +"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -9833,15 +9833,6 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - string-width@^5.0.1, string-width@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" @@ -9908,7 +9899,7 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1": +"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -9929,13 +9920,6 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - strip-ansi@^7.0.1: version "7.1.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" @@ -10781,16 +10765,7 @@ workerpool@6.2.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" - integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== - dependencies: - ansi-styles "^4.0.0" - string-width "^4.1.0" - strip-ansi "^6.0.0" - -wrap-ansi@^7.0.0: +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs new file mode 100644 index 00000000000..37d69fcf5bc --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs @@ -0,0 +1,42 @@ +use anyhow::Context; +use common::{forge::ForgeScriptArgs, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::Shell; + +use crate::{ + accept_ownership::accept_admin, + messages::{ + MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_NOT_INITIALIZED, MSG_CHAIN_OWNERSHIP_TRANSFERRED, + MSG_L1_SECRETS_MUST_BE_PRESENTED, + }, +}; + +pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let contracts = chain_config.get_contracts_config()?; + let secrets = chain_config.get_secrets_config()?; + let l1_rpc_url = secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + + let spinner = Spinner::new(MSG_ACCEPTING_ADMIN_SPINNER); + accept_admin( + shell, + &ecosystem_config, + contracts.l1.chain_admin_addr, + chain_config.get_wallets_config()?.governor_private_key(), + contracts.l1.diamond_proxy_addr, + &args, + l1_rpc_url.clone(), + ) + .await?; + spinner.finish(); + logger::success(MSG_CHAIN_OWNERSHIP_TRANSFERRED); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs index 3ea15d10f8b..5fc46c1b227 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs @@ -127,7 +127,7 @@ impl ChainCreateArgs { .ask() }); - let wallet_path: Option = if self.wallet_creation == Some(WalletCreation::InFile) { + let wallet_path: Option = if wallet_creation == WalletCreation::InFile { Some(self.wallet_path.unwrap_or_else(|| { Prompt::new(MSG_WALLET_PATH_PROMPT) .validate_with(|val: &String| { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs index 483b78e9b26..21796b3179d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use clap::Parser; use common::{db::DatabaseConfig, Prompt}; use config::ChainConfig; @@ -77,6 +78,46 @@ impl GenesisArgs { } } } + + pub fn fill_values_with_secrets( + mut self, + chain_config: &ChainConfig, + ) -> anyhow::Result { + let secrets = chain_config.get_secrets_config()?; + let database = secrets + .database + .context("Database secrets must be present")?; + + let (server_db_url, server_db_name) = if let Some(db_full_url) = database.server_url { + let db_config = DatabaseConfig::from_url(db_full_url.expose_url()) + .context("Invalid server database URL")?; + (Some(db_config.url), Some(db_config.name)) + } else { + (None, None) + }; + + let (prover_db_url, prover_db_name) = if let Some(db_full_url) = database.prover_url { + let db_config = DatabaseConfig::from_url(db_full_url.expose_url()) + .context("Invalid prover database URL")?; + (Some(db_config.url), Some(db_config.name)) + } else { + (None, None) + }; + + self.server_db_url = self.server_db_url.or(server_db_url); + self.server_db_name = self.server_db_name.or(server_db_name); + self.prover_db_url = self.prover_db_url.or(prover_db_url); + self.prover_db_name = self.prover_db_name.or(prover_db_name); + + Ok(self.fill_values_with_prompt(chain_config)) + } + + pub fn reset_db_names(&mut self) { + self.prover_db_name = None; + self.prover_db_url = None; + self.server_db_name = None; + self.server_db_url = None; + } } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs new file mode 100644 index 00000000000..b4a49f29d21 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs @@ -0,0 +1,70 @@ +use clap::Parser; +use common::Prompt; +use config::ChainConfig; +use serde::{Deserialize, Serialize}; +use types::L1Network; +use url::Url; + +use crate::{ + commands::chain::args::{ + genesis::{GenesisArgs, GenesisArgsFinal}, + init::InitArgsFinal, + }, + defaults::LOCAL_RPC_URL, + messages::{ + MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, + MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP + }, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser)] +pub struct InitConfigsArgs { + #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] + #[serde(flatten)] + pub genesis_args: GenesisArgs, + #[clap(long, help = MSG_L1_RPC_URL_HELP)] + pub l1_rpc_url: Option, + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + pub no_port_reallocation: bool, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct InitConfigsArgsFinal { + pub genesis_args: GenesisArgsFinal, + pub l1_rpc_url: String, + pub no_port_reallocation: bool, +} + +impl InitConfigsArgs { + pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitConfigsArgsFinal { + let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { + let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); + if config.l1_network == L1Network::Localhost { + prompt = prompt.default(LOCAL_RPC_URL); + } + prompt + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }); + + InitConfigsArgsFinal { + genesis_args: self.genesis_args.fill_values_with_prompt(config), + l1_rpc_url, + no_port_reallocation: self.no_port_reallocation, + } + } +} + +impl InitConfigsArgsFinal { + pub fn from_chain_init_args(init_args: &InitArgsFinal) -> InitConfigsArgsFinal { + InitConfigsArgsFinal { + genesis_args: init_args.genesis_args.clone(), + l1_rpc_url: init_args.l1_rpc_url.clone(), + no_port_reallocation: init_args.no_port_reallocation, + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs rename to zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs index 24a0539f27d..be4d28202b8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs @@ -5,9 +5,8 @@ use serde::{Deserialize, Serialize}; use types::L1Network; use url::Url; -use super::genesis::GenesisArgsFinal; use crate::{ - commands::chain::args::genesis::GenesisArgs, + commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, defaults::LOCAL_RPC_URL, messages::{ MSG_DEPLOY_PAYMASTER_PROMPT, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, @@ -15,6 +14,8 @@ use crate::{ }, }; +pub mod configs; + #[derive(Debug, Clone, Serialize, Deserialize, Parser)] pub struct InitArgs { /// All ethereum environment related arguments diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs index 98b2e226cc1..5f1be15231b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs @@ -6,9 +6,10 @@ use config::{ use ethers::utils::hex::ToHex; use xshell::Shell; -use super::common::register_chain; use crate::{ - commands::chain::args::build_transactions::BuildTransactionsArgs, + commands::chain::{ + args::build_transactions::BuildTransactionsArgs, register_chain::register_chain, + }, messages::{ MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER, MSG_CHAIN_NOT_FOUND_ERR, MSG_CHAIN_TRANSACTIONS_BUILT, MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs index 0aabc16154e..e0aa0b4e047 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs @@ -1,66 +1,12 @@ -use common::{ - forge::{Forge, ForgeScriptArgs}, - spinner::Spinner, -}; -use config::{ - forge_interface::{ - register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, - script_params::REGISTER_CHAIN_SCRIPT_PARAMS, - }, - traits::{ReadConfig, SaveConfig}, - ChainConfig, ContractsConfig, EcosystemConfig, -}; +use common::spinner::Spinner; +use config::{ChainConfig, EcosystemConfig}; use types::{BaseToken, L1Network, WalletCreation}; -use xshell::Shell; use crate::{ consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{MSG_DISTRIBUTING_ETH_SPINNER, MSG_MINT_BASE_TOKEN_SPINNER}, - utils::forge::{check_the_balance, fill_forge_private_key}, }; -#[allow(clippy::too_many_arguments)] -pub async fn register_chain( - shell: &Shell, - forge_args: ForgeScriptArgs, - config: &EcosystemConfig, - chain_config: &ChainConfig, - contracts: &mut ContractsConfig, - l1_rpc_url: String, - sender: Option, - broadcast: bool, -) -> anyhow::Result<()> { - let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); - - let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; - deploy_config.save(shell, deploy_config_path)?; - - let mut forge = Forge::new(&config.path_to_foundry()) - .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) - .with_ffi() - .with_rpc_url(l1_rpc_url); - - if broadcast { - forge = forge.with_broadcast(); - } - - if let Some(address) = sender { - forge = forge.with_sender(address); - } else { - forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; - check_the_balance(&forge).await?; - } - - forge.run(shell)?; - - let register_chain_output = RegisterChainOutput::read( - shell, - REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - contracts.set_chain_contracts(®ister_chain_output); - Ok(()) -} - // Distribute eth to the chain wallets for localhost environment pub async fn distribute_eth( ecosystem_config: &EcosystemConfig, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs similarity index 63% rename from zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs rename to zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs index c72183e98b7..bb78979ec38 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs @@ -5,33 +5,26 @@ use common::{ config::global_config, db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, logger, - server::{Server, ServerMode}, - spinner::Spinner, }; use config::{ override_config, set_databases, set_file_artifacts, set_rocks_db_config, - traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, ContractsConfig, EcosystemConfig, FileArtifacts, GeneralConfig, GenesisConfig, - SecretsConfig, WalletsConfig, + traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig, FileArtifacts, }; use types::ProverMode; use xshell::Shell; use zksync_basic_types::commitment::L1BatchCommitmentMode; -use super::args::genesis::GenesisArgsFinal; use crate::{ - commands::chain::args::genesis::GenesisArgs, + commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, consts::{ PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, PROVER_MIGRATIONS, SERVER_MIGRATIONS, }, messages::{ MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, - MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_FAILED_TO_RUN_SERVER_ERR, - MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, + MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_GENESIS_DATABASES_INITIALIZED, MSG_INITIALIZING_PROVER_DATABASE, MSG_INITIALIZING_SERVER_DATABASE, - MSG_RECREATE_ROCKS_DB_ERRROR, MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, - MSG_STARTING_GENESIS_SPINNER, + MSG_RECREATE_ROCKS_DB_ERRROR, }, utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }; @@ -41,79 +34,26 @@ pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { let chain_config = ecosystem_config .load_current_chain() .context(MSG_CHAIN_NOT_INITIALIZED)?; - let args = args.fill_values_with_prompt(&chain_config); - genesis(args, shell, &chain_config).await?; - logger::outro(MSG_GENESIS_COMPLETED); - - Ok(()) -} - -pub async fn genesis( - args: GenesisArgsFinal, - shell: &Shell, - config: &ChainConfig, -) -> anyhow::Result<()> { - shell.create_dir(&config.rocks_db_path)?; - - let link_to_code = config.link_to_code.clone(); - let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) - .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; - let mut general = config.get_general_config()?; - let file_artifacts = FileArtifacts::new(config.artifacts.clone()); - set_rocks_db_config(&mut general, rocks_db)?; - set_file_artifacts(&mut general, file_artifacts); - general.save_with_base_path(shell, &config.configs)?; - - if config.prover_version != ProverMode::NoProofs { - override_config( - shell, - link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), - config, - )?; - } - - if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { - override_config( - shell, - link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), - config, - )?; - } - - let mut secrets = config.get_secrets_config()?; + let mut secrets = chain_config.get_secrets_config()?; + let args = args.fill_values_with_secrets(&chain_config)?; set_databases(&mut secrets, &args.server_db, &args.prover_db)?; - secrets.save_with_base_path(shell, &config.configs)?; - - logger::note( - MSG_SELECTED_CONFIG, - logger::object_to_string(serde_json::json!({ - "chain_config": config, - "server_db_config": args.server_db, - "prover_db_config": args.prover_db, - })), - ); - logger::info(MSG_STARTING_GENESIS); + secrets.save_with_base_path(shell, &chain_config.configs)?; - let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); initialize_databases( shell, &args.server_db, &args.prover_db, - config.link_to_code.clone(), + chain_config.link_to_code.clone(), args.dont_drop, ) .await?; - spinner.finish(); - - let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); - run_server_genesis(config, shell)?; - spinner.finish(); + logger::outro(MSG_GENESIS_DATABASES_INITIALIZED); Ok(()) } -async fn initialize_databases( +pub async fn initialize_databases( shell: &Shell, server_db_config: &DatabaseConfig, prover_db_config: &DatabaseConfig, @@ -158,18 +98,41 @@ async fn initialize_databases( Ok(()) } -fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { - let server = Server::new(None, chain_config.link_to_code.clone(), false); - server - .run( +pub fn update_configs( + args: GenesisArgsFinal, + shell: &Shell, + config: &ChainConfig, +) -> anyhow::Result<()> { + shell.create_dir(&config.rocks_db_path)?; + + // Update secrets configs + let mut secrets = config.get_secrets_config()?; + set_databases(&mut secrets, &args.server_db, &args.prover_db)?; + secrets.save_with_base_path(shell, &config.configs)?; + + // Update general config + let mut general = config.get_general_config()?; + let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) + .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; + let file_artifacts = FileArtifacts::new(config.artifacts.clone()); + set_rocks_db_config(&mut general, rocks_db)?; + set_file_artifacts(&mut general, file_artifacts); + general.save_with_base_path(shell, &config.configs)?; + + let link_to_code = config.link_to_code.clone(); + if config.prover_version != ProverMode::NoProofs { + override_config( + shell, + link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), + config, + )?; + } + if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { + override_config( shell, - ServerMode::Genesis, - GenesisConfig::get_path_with_base_path(&chain_config.configs), - WalletsConfig::get_path_with_base_path(&chain_config.configs), - GeneralConfig::get_path_with_base_path(&chain_config.configs), - SecretsConfig::get_path_with_base_path(&chain_config.configs), - ContractsConfig::get_path_with_base_path(&chain_config.configs), - vec![], - ) - .context(MSG_FAILED_TO_RUN_SERVER_ERR) + link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), + config, + )?; + } + Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs new file mode 100644 index 00000000000..01842c2916a --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs @@ -0,0 +1,94 @@ +use anyhow::Context; +use clap::{command, Parser, Subcommand}; +use common::{logger, spinner::Spinner}; +use config::{ChainConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::{ + commands::chain::{ + args::genesis::{GenesisArgs, GenesisArgsFinal}, + genesis::{self, database::initialize_databases, server::run_server_genesis}, + }, + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, + MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, MSG_STARTING_GENESIS_SPINNER, + }, +}; + +// Genesis subcommands +pub mod database; +pub mod server; + +#[derive(Subcommand, Debug, Clone)] +pub enum GenesisSubcommands { + /// Initialize databases + #[command(alias = "database")] + InitDatabase(Box), + /// Runs server genesis + Server, +} + +#[derive(Parser, Debug)] +#[command()] +pub struct GenesisCommand { + #[command(subcommand)] + command: Option, + #[clap(flatten)] + args: GenesisArgs, +} + +pub(crate) async fn run(args: GenesisCommand, shell: &Shell) -> anyhow::Result<()> { + match args.command { + Some(GenesisSubcommands::InitDatabase(args)) => database::run(*args, shell).await, + Some(GenesisSubcommands::Server) => server::run(shell).await, + None => run_genesis(args.args, shell).await, + } +} + +pub async fn run_genesis(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let args = args.fill_values_with_prompt(&chain_config); + + genesis(args, shell, &chain_config).await?; + logger::outro(MSG_GENESIS_COMPLETED); + + Ok(()) +} + +pub async fn genesis( + args: GenesisArgsFinal, + shell: &Shell, + config: &ChainConfig, +) -> anyhow::Result<()> { + genesis::database::update_configs(args.clone(), shell, config)?; + + logger::note( + MSG_SELECTED_CONFIG, + logger::object_to_string(serde_json::json!({ + "chain_config": config, + "server_db_config": args.server_db, + "prover_db_config": args.prover_db, + })), + ); + logger::info(MSG_STARTING_GENESIS); + + let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); + initialize_databases( + shell, + &args.server_db, + &args.prover_db, + config.link_to_code.clone(), + args.dont_drop, + ) + .await?; + spinner.finish(); + + let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); + run_server_genesis(config, shell)?; + spinner.finish(); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs new file mode 100644 index 00000000000..50a74b7ea9e --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs @@ -0,0 +1,46 @@ +use anyhow::Context; +use common::{ + logger, + server::{Server, ServerMode}, + spinner::Spinner, +}; +use config::{ + traits::FileConfigWithDefaultName, ChainConfig, ContractsConfig, EcosystemConfig, + GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, +}; +use xshell::Shell; + +use crate::messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_RUN_SERVER_ERR, MSG_GENESIS_COMPLETED, + MSG_STARTING_GENESIS_SPINNER, +}; + +pub async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); + run_server_genesis(&chain_config, shell)?; + spinner.finish(); + logger::outro(MSG_GENESIS_COMPLETED); + + Ok(()) +} + +pub fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { + let server = Server::new(None, chain_config.link_to_code.clone(), false); + server + .run( + shell, + ServerMode::Genesis, + GenesisConfig::get_path_with_base_path(&chain_config.configs), + WalletsConfig::get_path_with_base_path(&chain_config.configs), + GeneralConfig::get_path_with_base_path(&chain_config.configs), + SecretsConfig::get_path_with_base_path(&chain_config.configs), + ContractsConfig::get_path_with_base_path(&chain_config.configs), + vec![], + ) + .context(MSG_FAILED_TO_RUN_SERVER_ERR) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs new file mode 100644 index 00000000000..e6b9fa7117d --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs @@ -0,0 +1,107 @@ +use anyhow::Context; +use common::logger; +use config::{ + copy_configs, set_l1_rpc_url, update_from_chain_config, + ChainConfig, ContractsConfig, EcosystemConfig, + traits::SaveConfigWithBasePath, + DEFAULT_CONSENSUS_PORT, +}; +use ethers::types::Address; +use xshell::Shell; + +use crate::{ + commands::{ + chain::{ + args::init::configs::{InitConfigsArgs, InitConfigsArgsFinal}, + genesis, + }, + portal::update_portal_config, + }, + defaults::PORT_RANGE_END, + messages::{ + MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, + MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, + }, + utils::{ + consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, + ports::EcosystemPortsScanner, + }, +}; + +pub async fn run(args: InitConfigsArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let args = args.fill_values_with_prompt(&chain_config); + + init_configs(&args, shell, &ecosystem_config, &chain_config).await?; + logger::outro(MSG_CHAIN_CONFIGS_INITIALIZED); + + Ok(()) +} + +pub async fn init_configs( + init_args: &InitConfigsArgsFinal, + shell: &Shell, + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, +) -> anyhow::Result { + // Port scanner should run before copying configs to avoid marking initial ports as assigned + let mut ecosystem_ports = EcosystemPortsScanner::scan(shell)?; + copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; + + if !init_args.no_port_reallocation { + ecosystem_ports.allocate_ports_in_yaml( + shell, + &chain_config.path_to_general_config(), + chain_config.id, + )?; + } + + // Initialize general config + let mut general_config = chain_config.get_general_config()?; + + // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` + let offset = ((chain_config.id - 1) * 100) as u16; + let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; + let consensus_port = + ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + + let consensus_keys = generate_consensus_keys(); + let consensus_config = get_consensus_config( + chain_config, + consensus_port, + Some(consensus_keys.clone()), + None, + )?; + general_config.consensus_config = Some(consensus_config); + general_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize genesis config + let mut genesis_config = chain_config.get_genesis_config()?; + update_from_chain_config(&mut genesis_config, chain_config); + genesis_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize contracts config + let mut contracts_config = ecosystem_config.get_contracts_config()?; + contracts_config.l1.diamond_proxy_addr = Address::zero(); + contracts_config.l1.governance_addr = Address::zero(); + contracts_config.l1.chain_admin_addr = Address::zero(); + contracts_config.l1.base_token_addr = chain_config.base_token.address; + contracts_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize secrets config + let mut secrets = chain_config.get_secrets_config()?; + set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; + secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); + secrets.save_with_base_path(shell, &chain_config.configs)?; + + genesis::database::update_configs(init_args.genesis_args.clone(), shell, chain_config)?; + + update_portal_config(shell, chain_config) + .await + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + Ok(contracts_config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs similarity index 55% rename from zk_toolbox/crates/zk_inception/src/commands/chain/init.rs rename to zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs index 4d7096723da..8a36f4e32b2 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs @@ -1,106 +1,91 @@ use anyhow::Context; +use clap::{command, Parser, Subcommand}; use common::{git, logger, spinner::Spinner}; -use config::{ - copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, - ChainConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, -}; +use config::{traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig}; use types::BaseToken; use xshell::Shell; -use super::common::{distribute_eth, mint_base_token, register_chain}; use crate::{ accept_ownership::accept_admin, - commands::{ - chain::{ - args::init::{InitArgs, InitArgsFinal}, - deploy_l2_contracts, deploy_paymaster, - genesis::genesis, - set_token_multiplier_setter::set_token_multiplier_setter, - setup_legacy_bridge::setup_legacy_bridge, + commands::chain::{ + args::init::{ + configs::{InitConfigsArgs, InitConfigsArgsFinal}, + InitArgs, InitArgsFinal, }, - portal::update_portal_config, + common::{distribute_eth, mint_base_token}, + deploy_l2_contracts, deploy_paymaster, + genesis::genesis, + init::configs::init_configs, + register_chain::register_chain, + set_token_multiplier_setter::set_token_multiplier_setter, + setup_legacy_bridge::setup_legacy_bridge, }, - defaults::PORT_RANGE_END, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_DEPLOYING_PAYMASTER, MSG_GENESIS_DATABASE_ERR, - MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, + MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, - utils::{ - consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, - ports::EcosystemPortsScanner, - }, }; -pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { +// Init subcommands +pub mod configs; + +#[derive(Subcommand, Debug, Clone)] +pub enum ChainInitSubcommands { + /// Initialize chain configs + Configs(InitConfigsArgs), +} + +#[derive(Parser, Debug)] +#[command()] +pub struct ChainInitCommand { + #[command(subcommand)] + command: Option, + #[clap(flatten)] + args: InitArgs, +} + +pub(crate) async fn run(args: ChainInitCommand, shell: &Shell) -> anyhow::Result<()> { + match args.command { + Some(ChainInitSubcommands::Configs(args)) => configs::run(args, shell).await, + None => run_init(args.args, shell).await, + } +} + +async fn run_init(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { let config = EcosystemConfig::from_file(shell)?; let chain_config = config .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let mut args = args.fill_values_with_prompt(&chain_config); + let args = args.fill_values_with_prompt(&chain_config); logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); logger::info(msg_initializing_chain("")); git::submodule_update(shell, config.link_to_code.clone())?; - init(&mut args, shell, &config, &chain_config).await?; + init(&args, shell, &config, &chain_config).await?; logger::success(MSG_CHAIN_INITIALIZED); Ok(()) } pub async fn init( - init_args: &mut InitArgsFinal, + init_args: &InitArgsFinal, shell: &Shell, ecosystem_config: &EcosystemConfig, chain_config: &ChainConfig, ) -> anyhow::Result<()> { - let mut ecosystem_ports = EcosystemPortsScanner::scan(shell)?; - copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; - - if !init_args.no_port_reallocation { - ecosystem_ports.allocate_ports_in_yaml( - shell, - &chain_config.path_to_general_config(), - chain_config.id, - )?; - } - let mut general_config = chain_config.get_general_config()?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - let offset = ((chain_config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = - ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; - - let consensus_keys = generate_consensus_keys(); - let consensus_config = get_consensus_config( - chain_config, - consensus_port, - Some(consensus_keys.clone()), - None, - )?; - general_config.consensus_config = Some(consensus_config); - general_config.save_with_base_path(shell, &chain_config.configs)?; - - let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, chain_config); - genesis_config.save_with_base_path(shell, &chain_config.configs)?; - - // Copy ecosystem contracts - let mut contracts_config = ecosystem_config.get_contracts_config()?; - contracts_config.l1.base_token_addr = chain_config.base_token.address; - contracts_config.save_with_base_path(shell, &chain_config.configs)?; - + // Initialize configs + let init_configs_args = InitConfigsArgsFinal::from_chain_init_args(init_args); + let mut contracts_config = + init_configs(&init_configs_args, shell, ecosystem_config, chain_config).await?; + + // Fund some wallet addresses with ETH or base token (only for Localhost) distribute_eth(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; mint_base_token(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; - let mut secrets = chain_config.get_secrets_config()?; - set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; - secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); - secrets.save_with_base_path(shell, &chain_config.configs)?; - + // Register chain on BridgeHub (run by L1 Governor) let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); register_chain( shell, @@ -115,6 +100,8 @@ pub async fn init( .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; spinner.finish(); + + // Accept ownership for DiamondProxy (run by L2 Governor) let spinner = Spinner::new(MSG_ACCEPTING_ADMIN_SPINNER); accept_admin( shell, @@ -128,6 +115,7 @@ pub async fn init( .await?; spinner.finish(); + // Set token multiplier setter address (run by L2 Governor) if chain_config.base_token != BaseToken::eth() { let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); set_token_multiplier_setter( @@ -148,6 +136,7 @@ pub async fn init( spinner.finish(); } + // Deploy L2 contracts: L2SharedBridge, L2DefaultUpgrader, ... (run by L1 Governor) deploy_l2_contracts::deploy_l2_contracts( shell, chain_config, @@ -158,6 +147,7 @@ pub async fn init( .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; + // Setup legacy bridge - shouldn't be used for new chains (run by L1 Governor) if let Some(true) = chain_config.legacy_bridge { setup_legacy_bridge( shell, @@ -169,6 +159,7 @@ pub async fn init( .await?; } + // Deploy Paymaster contract (run by L2 Governor) if init_args.deploy_paymaster { let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); deploy_paymaster::deploy_paymaster( @@ -187,10 +178,6 @@ pub async fn init( genesis(init_args.genesis_args.clone(), shell, chain_config) .await .context(MSG_GENESIS_DATABASE_ERR)?; - - update_portal_config(shell, chain_config) - .await - .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; - + Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index 4ddc4bf5856..378309a07cb 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -1,15 +1,16 @@ use ::common::forge::ForgeScriptArgs; use args::build_transactions::BuildTransactionsArgs; pub(crate) use args::create::ChainCreateArgsFinal; -use clap::Subcommand; +use clap::{command, Subcommand}; pub(crate) use create::create_chain_inner; use xshell::Shell; use crate::commands::chain::{ - args::{create::ChainCreateArgs, genesis::GenesisArgs, init::InitArgs}, - deploy_l2_contracts::Deploy2ContractsOption, + args::create::ChainCreateArgs, deploy_l2_contracts::Deploy2ContractsOption, + genesis::GenesisCommand, init::ChainInitCommand, }; +mod accept_chain_ownership; pub(crate) mod args; mod build_transactions; mod common; @@ -17,7 +18,8 @@ mod create; pub mod deploy_l2_contracts; pub mod deploy_paymaster; pub mod genesis; -pub(crate) mod init; +pub mod init; +pub mod register_chain; mod set_token_multiplier_setter; mod setup_legacy_bridge; @@ -28,20 +30,32 @@ pub enum ChainCommands { /// Create unsigned transactions for chain deployment BuildTransactions(BuildTransactionsArgs), /// Initialize chain, deploying necessary contracts and performing on-chain operations - Init(InitArgs), + Init(Box), /// Run server genesis - Genesis(GenesisArgs), - /// Initialize bridges on l2 - #[command(alias = "bridge")] - InitializeBridges(ForgeScriptArgs), - /// Deploy all l2 contracts + Genesis(GenesisCommand), + /// Register a new chain on L1 (executed by L1 governor). + /// This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, + /// registers chain with BridgeHub and sets pending admin for DiamondProxy. + /// Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership` + #[command(alias = "register")] + RegisterChain(ForgeScriptArgs), + /// Deploy all L2 contracts (executed by L1 governor). #[command(alias = "l2")] DeployL2Contracts(ForgeScriptArgs), + /// Accept ownership of L2 chain (executed by L2 governor). + /// This command should be run after `register-chain` to accept ownership of newly created + /// DiamondProxy contract. + #[command(alias = "accept-ownership")] + AcceptChainOwnership(ForgeScriptArgs), + /// Initialize bridges on L2 + #[command(alias = "bridge")] + InitializeBridges(ForgeScriptArgs), /// Deploy L2 consensus registry #[command(alias = "consensus")] DeployConsensusRegistry(ForgeScriptArgs), /// Deploy Default Upgrader - Upgrader(ForgeScriptArgs), + #[command(alias = "upgrader")] + DeployUpgrader(ForgeScriptArgs), /// Deploy paymaster smart contract #[command(alias = "paymaster")] DeployPaymaster(ForgeScriptArgs), @@ -52,16 +66,18 @@ pub enum ChainCommands { pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<()> { match args { ChainCommands::Create(args) => create::run(args, shell), - ChainCommands::Init(args) => init::run(args, shell).await, + ChainCommands::Init(args) => init::run(*args, shell).await, ChainCommands::BuildTransactions(args) => build_transactions::run(args, shell).await, ChainCommands::Genesis(args) => genesis::run(args, shell).await, + ChainCommands::RegisterChain(args) => register_chain::run(args, shell).await, ChainCommands::DeployL2Contracts(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::All).await } + ChainCommands::AcceptChainOwnership(args) => accept_chain_ownership::run(args, shell).await, ChainCommands::DeployConsensusRegistry(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await } - ChainCommands::Upgrader(args) => { + ChainCommands::DeployUpgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } ChainCommands::InitializeBridges(args) => { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs new file mode 100644 index 00000000000..9f2ff41f897 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs @@ -0,0 +1,96 @@ +use anyhow::Context; +use common::{ + forge::{Forge, ForgeScriptArgs}, + logger, + spinner::Spinner, +}; +use config::{ + forge_interface::{ + register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, + script_params::REGISTER_CHAIN_SCRIPT_PARAMS, + }, + traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use xshell::Shell; + +use crate::{ + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_CHAIN_REGISTERED, MSG_L1_SECRETS_MUST_BE_PRESENTED, + MSG_REGISTERING_CHAIN_SPINNER, + }, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let mut contracts = chain_config.get_contracts_config()?; + let secrets = chain_config.get_secrets_config()?; + let l1_rpc_url = secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); + register_chain( + shell, + args, + &ecosystem_config, + &chain_config, + &mut contracts, + l1_rpc_url, + None, + true, + ) + .await?; + contracts.save_with_base_path(shell, chain_config.configs)?; + spinner.finish(); + logger::success(MSG_CHAIN_REGISTERED); + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +pub async fn register_chain( + shell: &Shell, + forge_args: ForgeScriptArgs, + config: &EcosystemConfig, + chain_config: &ChainConfig, + contracts: &mut ContractsConfig, + l1_rpc_url: String, + sender: Option, + broadcast: bool, +) -> anyhow::Result<()> { + let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); + + let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; + deploy_config.save(shell, deploy_config_path)?; + + let mut forge = Forge::new(&config.path_to_foundry()) + .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url); + + if broadcast { + forge = forge.with_broadcast(); + } + + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; + check_the_balance(&forge).await?; + } + + forge.run(shell)?; + + let register_chain_output = RegisterChainOutput::read( + shell, + REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), + )?; + contracts.set_chain_contracts(®ister_chain_output); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs index 7898f8d254a..6d6ed2f3fd9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs @@ -10,10 +10,10 @@ use crate::{ commands::chain::args::genesis::GenesisArgs, defaults::LOCAL_RPC_URL, messages::{ - MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEPLOY_PAYMASTER_PROMPT, - MSG_DEV_ARG_HELP, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, - MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, - MSG_OBSERVABILITY_PROMPT, + MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEV_ARG_HELP, + MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, + MSG_L1_RPC_URL_PROMPT, MSG_OBSERVABILITY_HELP, MSG_OBSERVABILITY_PROMPT, + MSG_NO_PORT_REALLOCATION_HELP, }, }; @@ -74,9 +74,6 @@ pub struct EcosystemArgsFinal { #[derive(Debug, Clone, Serialize, Deserialize, Parser)] pub struct EcosystemInitArgs { - /// Deploy Paymaster contract - #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub deploy_paymaster: Option, /// Deploy ERC20 contracts #[clap(long, default_missing_value = "true", num_args = 0..=1)] pub deploy_erc20: Option, @@ -86,9 +83,15 @@ pub struct EcosystemInitArgs { #[clap(flatten)] #[serde(flatten)] pub forge_args: ForgeScriptArgs, + /// Deploy Paymaster contract + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub deploy_paymaster: Option, #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] #[serde(flatten)] pub genesis_args: GenesisArgs, + /// Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand) + #[clap(long, default_value_t = false)] + pub ecosystem_only: bool, #[clap(long, help = MSG_DEV_ARG_HELP)] pub dev: bool, #[clap(long, short = 'o', help = MSG_OBSERVABILITY_HELP, default_missing_value = "true", num_args = 0..=1)] @@ -99,20 +102,14 @@ pub struct EcosystemInitArgs { impl EcosystemInitArgs { pub fn fill_values_with_prompt(self, l1_network: L1Network) -> EcosystemInitArgsFinal { - let (deploy_paymaster, deploy_erc20) = if self.dev { - (true, true) + let deploy_erc20 = if self.dev { + true } else { - let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { - PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) - .default(true) - .ask() - }); - let deploy_erc20 = self.deploy_erc20.unwrap_or_else(|| { + self.deploy_erc20.unwrap_or_else(|| { PromptConfirm::new(MSG_DEPLOY_ERC20_PROMPT) .default(true) .ask() - }); - (deploy_paymaster, deploy_erc20) + }) }; let ecosystem = self.ecosystem.fill_values_with_prompt(l1_network, self.dev); let observability = if self.dev { @@ -126,12 +123,12 @@ impl EcosystemInitArgs { }; EcosystemInitArgsFinal { - deploy_paymaster, deploy_erc20, ecosystem, forge_args: self.forge_args.clone(), dev: self.dev, observability, + ecosystem_only: self.ecosystem_only, no_port_reallocation: self.no_port_reallocation, } } @@ -139,11 +136,11 @@ impl EcosystemInitArgs { #[derive(Debug, Serialize, Deserialize)] pub struct EcosystemInitArgsFinal { - pub deploy_paymaster: bool, pub deploy_erc20: bool, pub ecosystem: EcosystemArgsFinal, pub forge_args: ForgeScriptArgs, pub dev: bool, pub observability: bool, + pub ecosystem_only: bool, pub no_port_reallocation: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 80efc48f732..67ae3162842 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -37,9 +37,8 @@ use crate::{ }, }, messages::{ - msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, - msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, - MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, + msg_chain_load_err, msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, + msg_initializing_chain, MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, MSG_DEPLOYING_ERC20_SPINNER, MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, MSG_INITIALIZING_ECOSYSTEM, MSG_INTALLING_DEPS_SPINNER, @@ -57,11 +56,9 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { Err(_) => create_initial_deployments_config(shell, &ecosystem_config.config)?, }; - let mut genesis_args = args.genesis_args.clone(); - if args.dev { - genesis_args.use_default = true; - } - let mut final_ecosystem_args = args.fill_values_with_prompt(ecosystem_config.l1_network); + let mut final_ecosystem_args = args + .clone() + .fill_values_with_prompt(ecosystem_config.l1_network); logger::info(MSG_INITIALIZING_ECOSYSTEM); @@ -69,7 +66,7 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { setup_observability::run(shell)?; } - let contracts_config = init( + let contracts_config = init_ecosystem( &mut final_ecosystem_args, shell, &ecosystem_config, @@ -94,42 +91,17 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { .await?; } - // If the name of chain passed then we deploy exactly this chain otherwise deploy all chains - let list_of_chains = if let Some(name) = global_config().chain_name.clone() { - vec![name] - } else { - ecosystem_config.list_of_chains() - }; - - for chain_name in &list_of_chains { - logger::info(msg_initializing_chain(chain_name)); - let chain_config = ecosystem_config - .load_chain(Some(chain_name.clone())) - .context(MSG_CHAIN_NOT_INITIALIZED)?; - - let mut chain_init_args = chain::args::init::InitArgsFinal { - forge_args: final_ecosystem_args.forge_args.clone(), - genesis_args: genesis_args.clone().fill_values_with_prompt(&chain_config), - deploy_paymaster: final_ecosystem_args.deploy_paymaster, - l1_rpc_url: final_ecosystem_args.ecosystem.l1_rpc_url.clone(), - no_port_reallocation: final_ecosystem_args.no_port_reallocation, - }; - - chain::init::init( - &mut chain_init_args, - shell, - &ecosystem_config, - &chain_config, - ) - .await?; + // Initialize chain(s) + let mut chains: Vec = vec![]; + if !final_ecosystem_args.ecosystem_only { + chains = init_chains(&args, &final_ecosystem_args, shell, &ecosystem_config).await?; } - - logger::outro(msg_ecosystem_initialized(&list_of_chains.join(","))); + logger::outro(msg_ecosystem_initialized(&chains.join(","))); Ok(()) } -async fn init( +async fn init_ecosystem( init_args: &mut EcosystemInitArgsFinal, shell: &Shell, ecosystem_config: &EcosystemConfig, @@ -358,3 +330,53 @@ async fn deploy_ecosystem_inner( Ok(contracts_config) } + +async fn init_chains( + init_args: &EcosystemInitArgs, + final_init_args: &EcosystemInitArgsFinal, + shell: &Shell, + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result> { + // If the name of chain passed then we deploy exactly this chain otherwise deploy all chains + let list_of_chains = if let Some(name) = global_config().chain_name.clone() { + vec![name] + } else { + ecosystem_config.list_of_chains() + }; + // Set default values for dev mode + let mut deploy_paymaster = init_args.deploy_paymaster; + let mut genesis_args = init_args.genesis_args.clone(); + if final_init_args.dev { + deploy_paymaster = Some(true); + genesis_args.use_default = true; + } + // Can't initialize multiple chains with the same DB + if list_of_chains.len() > 1 { + genesis_args.reset_db_names(); + } + // Initialize chains + for chain_name in &list_of_chains { + logger::info(msg_initializing_chain(chain_name)); + let chain_config = ecosystem_config + .load_chain(Some(chain_name.clone())) + .context(msg_chain_load_err(chain_name))?; + + let chain_init_args = chain::args::init::InitArgs { + forge_args: final_init_args.forge_args.clone(), + genesis_args: genesis_args.clone(), + deploy_paymaster, + l1_rpc_url: Some(final_init_args.ecosystem.l1_rpc_url.clone()), + no_port_reallocation: final_init_args.no_port_reallocation + }; + let final_chain_init_args = chain_init_args.fill_values_with_prompt(&chain_config); + + chain::init::init( + &final_chain_init_args, + shell, + ecosystem_config, + &chain_config, + ) + .await?; + } + Ok(list_of_chains) +} diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index 0af9922d0c4..a305ca053b7 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -42,10 +42,10 @@ struct Inception { pub enum InceptionSubcommands { /// Ecosystem related commands #[command(subcommand, alias = "e")] - Ecosystem(EcosystemCommands), + Ecosystem(Box), /// Chain related commands #[command(subcommand, alias = "c")] - Chain(ChainCommands), + Chain(Box), /// Prover related commands #[command(subcommand, alias = "p")] Prover(ProverCommands), @@ -121,8 +121,8 @@ async fn main() -> anyhow::Result<()> { async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Result<()> { match inception_args.command { - InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, args).await?, - InceptionSubcommands::Chain(args) => commands::chain::run(shell, args).await?, + InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, *args).await?, + InceptionSubcommands::Chain(args) => commands::chain::run(shell, *args).await?, InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, InceptionSubcommands::Containers(args) => commands::containers::run(shell, args)?, diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 621441ae8d4..ebdcf7378a4 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -72,6 +72,10 @@ pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; pub(super) const MSG_INITIALIZING_ECOSYSTEM: &str = "Initializing ecosystem"; pub(super) const MSG_DEPLOYING_ERC20: &str = "Deploying ERC20 contracts"; pub(super) const MSG_CHAIN_INITIALIZED: &str = "Chain initialized successfully"; +pub(super) const MSG_CHAIN_CONFIGS_INITIALIZED: &str = "Chain configs were initialized"; +pub(super) const MSG_CHAIN_OWNERSHIP_TRANSFERRED: &str = + "Chain ownership was transferred successfully"; +pub(super) const MSG_CHAIN_REGISTERED: &str = "Chain registraion was successful"; pub(super) const MSG_DISTRIBUTING_ETH_SPINNER: &str = "Distributing eth..."; pub(super) const MSG_MINT_BASE_TOKEN_SPINNER: &str = "Minting base token to the governance addresses..."; @@ -100,7 +104,11 @@ pub(super) fn msg_initializing_chain(chain_name: &str) -> String { } pub(super) fn msg_ecosystem_initialized(chains: &str) -> String { - format!("Ecosystem initialized successfully with chains {chains}") + if chains.is_empty() { + "Ecosystem initialized successfully. You can initialize chain with `chain init`".to_string() + } else { + format!("Ecosystem initialized successfully with chains {chains}") + } } /// Ecosystem default related messages @@ -187,6 +195,7 @@ pub(super) const MSG_INITIALIZING_SERVER_DATABASE: &str = "Initializing server d pub(super) const MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR: &str = "Failed to drop server database"; pub(super) const MSG_INITIALIZING_PROVER_DATABASE: &str = "Initializing prover database"; pub(super) const MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR: &str = "Failed to drop prover database"; +pub(super) const MSG_GENESIS_DATABASES_INITIALIZED: &str = "Databases initialized successfully"; /// Chain update related messages pub(super) const MSG_WALLETS_CONFIG_MUST_BE_PRESENT: &str = "Wallets configuration must be present"; From b7ba4286ce38a2e833087c0b91c584de0f956fa7 Mon Sep 17 00:00:00 2001 From: Danil Date: Wed, 2 Oct 2024 21:05:10 +0200 Subject: [PATCH 004/140] fix(zk_toolbox): Correct secrets (#3004) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. Signed-off-by: Danil --- zk_toolbox/crates/zk_inception/src/commands/update.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/zk_toolbox/crates/zk_inception/src/commands/update.rs b/zk_toolbox/crates/zk_inception/src/commands/update.rs index 5cb7208ffd0..534d490e6ca 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/update.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/update.rs @@ -8,11 +8,10 @@ use common::{ yaml::{merge_yaml, ConfigDiff}, }; use config::{ - traits::ReadConfigWithBasePath, ChainConfig, EcosystemConfig, CONTRACTS_FILE, EN_CONFIG_FILE, - ERA_OBSERBAVILITY_DIR, GENERAL_FILE, GENESIS_FILE, SECRETS_FILE, + ChainConfig, EcosystemConfig, CONTRACTS_FILE, EN_CONFIG_FILE, ERA_OBSERBAVILITY_DIR, + GENERAL_FILE, GENESIS_FILE, SECRETS_FILE, }; use xshell::Shell; -use zksync_config::configs::Secrets; use super::args::UpdateArgs; use crate::{ @@ -183,7 +182,7 @@ async fn update_chain( )?; } - let secrets = Secrets::read_with_base_path(shell, secrets)?; + let secrets = chain.get_secrets_config()?; if let Some(db) = secrets.database { if let Some(url) = db.server_url { From a5ac9c164bc0d16a879bef1cab1ccdaeac205bdc Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Thu, 3 Oct 2024 17:22:37 +0700 Subject: [PATCH 005/140] feat(ci): Add external-node build to Makefile (#3002) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add external-node build to Makefile ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- docker/Makefile | 8 ++++++-- docs/guides/build-docker.md | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docker/Makefile b/docker/Makefile index 444a94ce221..c469587c8ff 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -56,7 +56,7 @@ check-tools: check-nodejs check-yarn check-rust check-sqlx-cli check-docker chec # Check that contracts are checkout properly check-contracts: @if [ ! -d ../contracts/l1-contracts/lib/forge-std/foundry.toml ] || [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ - echo "l1-contracts git submodule is missing. Please re-download repo with `git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git`"; \ + echo "l1-contracts git submodule is missing. Please re-download repo with 'git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git'"; \ exit 1; \ fi @@ -93,9 +93,12 @@ build-witness-generator: check-tools prepare-keys $(DOCKER_BUILD_CMD) --file witness-generator/Dockerfile --load \ --tag witness-generator:$(PROTOCOL_VERSION) $(CONTEXT) +build-external-node: check-tools prepare-contracts + $(DOCKER_BUILD_CMD) --file external-node/Dockerfile --load \ + --tag external-node:$(PROTOCOL_VERSION) $(CONTEXT) # Build all containers -build-all: build-contract-verifier build-server-v2 build-witness-generator build-circuit-prover-gpu cleanup +build-all: build-contract-verifier build-server-v2 build-witness-generator build-circuit-prover-gpu build-external-node cleanup # Clean generated images clean-all: @@ -104,3 +107,4 @@ clean-all: docker rmi server-v2:$(PROTOCOL_VERSION) >/dev/null 2>&1 docker rmi prover:$(PROTOCOL_VERSION) >/dev/null 2>&1 docker rmi witness-generator:$(PROTOCOL_VERSION) >/dev/null 2>&1 + docker rmi external-node:$(PROTOCOL_VERSION) >/dev/null 2>&1 diff --git a/docs/guides/build-docker.md b/docs/guides/build-docker.md index a9e8f5d3e76..5dd9cff022b 100644 --- a/docs/guides/build-docker.md +++ b/docs/guides/build-docker.md @@ -25,6 +25,7 @@ contract-verifier:2.0 server-v2:2.0 prover:2.0 witness-generator:2.0 +external-node:2.0 ``` Alternatively, you may build only needed components - available targets are @@ -34,6 +35,7 @@ make -C ./docker build-contract-verifier make -C ./docker build-server-v2 make -C ./docker build-circuit-prover-gpu make -C ./docker build-witness-generator +make -C ./docker build-external-node ``` ## Building updated images From 092eed98751f81a347854a993fdb6913fd7fab2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Fri, 4 Oct 2024 03:17:35 -0300 Subject: [PATCH 006/140] fix(zk_toolbox): Remove prover db from server init (#3009) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove prover db from server init ## Why ❔ prover db is not needed in server init since prover db is initialized with prover init --- .github/workflows/ci-core-reusable.yml | 8 ---- zk_toolbox/crates/config/src/secrets.rs | 8 ++-- .../src/commands/chain/args/genesis.rs | 44 ++----------------- .../src/commands/chain/genesis/database.rs | 36 ++++----------- .../src/commands/chain/genesis/mod.rs | 6 +-- 5 files changed, 17 insertions(+), 85 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 9dbd4202afd..d03e44f8bca 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -216,8 +216,6 @@ jobs: --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_era \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_era \ --ignore-prerequisites --verbose \ --observability=false @@ -246,8 +244,6 @@ jobs: --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_validium \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_validium \ --chain validium - name: Create and initialize chain with Custom Token @@ -269,8 +265,6 @@ jobs: --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_custom_token \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_custom_token \ --chain custom_token - name: Create and register chain with transactions signed "offline" @@ -327,8 +321,6 @@ jobs: --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_consensus \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_consensus \ --chain consensus - name: Build test dependencies diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zk_toolbox/crates/config/src/secrets.rs index f0a39148b03..02ace5da88e 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zk_toolbox/crates/config/src/secrets.rs @@ -12,17 +12,15 @@ use crate::{ traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, }; -pub fn set_databases( +pub fn set_server_database( secrets: &mut SecretsConfig, server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, ) -> anyhow::Result<()> { let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Server database must be presented")?; database.server_url = Some(SensitiveUrl::from(server_db_config.full_url())); - database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } @@ -33,7 +31,7 @@ pub fn set_prover_database( let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Prover database must be presented")?; database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs index 21796b3179d..aaf995985a3 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs @@ -7,11 +7,10 @@ use slugify_rs::slugify; use url::Url; use crate::{ - defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL, DATABASE_SERVER_URL}, + defaults::{generate_db_names, DBNames, DATABASE_SERVER_URL}, messages::{ - msg_prover_db_name_prompt, msg_prover_db_url_prompt, msg_server_db_name_prompt, - msg_server_db_url_prompt, MSG_PROVER_DB_NAME_HELP, MSG_PROVER_DB_URL_HELP, - MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, + msg_server_db_name_prompt, msg_server_db_url_prompt, MSG_SERVER_DB_NAME_HELP, + MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, }, }; @@ -21,10 +20,6 @@ pub struct GenesisArgs { pub server_db_url: Option, #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] pub server_db_name: Option, - #[clap(long, help = MSG_PROVER_DB_URL_HELP)] - pub prover_db_url: Option, - #[clap(long, help = MSG_PROVER_DB_NAME_HELP)] - pub prover_db_name: Option, #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] pub use_default: bool, #[clap(long, short, action)] @@ -33,15 +28,11 @@ pub struct GenesisArgs { impl GenesisArgs { pub fn fill_values_with_prompt(self, config: &ChainConfig) -> GenesisArgsFinal { - let DBNames { - server_name, - prover_name, - } = generate_db_names(config); + let DBNames { server_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); if self.use_default { GenesisArgsFinal { server_db: DatabaseConfig::new(DATABASE_SERVER_URL.clone(), server_name), - prover_db: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop: self.dont_drop, } } else { @@ -58,22 +49,8 @@ impl GenesisArgs { }), separator = "_" ); - let prover_db_url = self.prover_db_url.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_url_prompt(&chain_name)) - .default(DATABASE_PROVER_URL.as_str()) - .ask() - }); - let prover_db_name = slugify!( - &self.prover_db_name.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_name_prompt(&chain_name)) - .default(&prover_name) - .ask() - }), - separator = "_" - ); GenesisArgsFinal { server_db: DatabaseConfig::new(server_db_url, server_db_name), - prover_db: DatabaseConfig::new(prover_db_url, prover_db_name), dont_drop: self.dont_drop, } } @@ -96,25 +73,13 @@ impl GenesisArgs { (None, None) }; - let (prover_db_url, prover_db_name) = if let Some(db_full_url) = database.prover_url { - let db_config = DatabaseConfig::from_url(db_full_url.expose_url()) - .context("Invalid prover database URL")?; - (Some(db_config.url), Some(db_config.name)) - } else { - (None, None) - }; - self.server_db_url = self.server_db_url.or(server_db_url); self.server_db_name = self.server_db_name.or(server_db_name); - self.prover_db_url = self.prover_db_url.or(prover_db_url); - self.prover_db_name = self.prover_db_name.or(prover_db_name); Ok(self.fill_values_with_prompt(chain_config)) } pub fn reset_db_names(&mut self) { - self.prover_db_name = None; - self.prover_db_url = None; self.server_db_name = None; self.server_db_url = None; } @@ -123,6 +88,5 @@ impl GenesisArgs { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GenesisArgsFinal { pub server_db: DatabaseConfig, - pub prover_db: DatabaseConfig, pub dont_drop: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs index bb78979ec38..edf480946be 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs @@ -7,7 +7,7 @@ use common::{ logger, }; use config::{ - override_config, set_databases, set_file_artifacts, set_rocks_db_config, + override_config, set_file_artifacts, set_rocks_db_config, set_server_database, traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig, FileArtifacts, }; use types::ProverMode; @@ -18,12 +18,11 @@ use crate::{ commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, consts::{ PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, - PROVER_MIGRATIONS, SERVER_MIGRATIONS, + SERVER_MIGRATIONS, }, messages::{ - MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, - MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_GENESIS_DATABASES_INITIALIZED, - MSG_INITIALIZING_PROVER_DATABASE, MSG_INITIALIZING_SERVER_DATABASE, + MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, + MSG_GENESIS_DATABASES_INITIALIZED, MSG_INITIALIZING_SERVER_DATABASE, MSG_RECREATE_ROCKS_DB_ERRROR, }, utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, @@ -37,13 +36,12 @@ pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { let mut secrets = chain_config.get_secrets_config()?; let args = args.fill_values_with_secrets(&chain_config)?; - set_databases(&mut secrets, &args.server_db, &args.prover_db)?; + set_server_database(&mut secrets, &args.server_db)?; secrets.save_with_base_path(shell, &chain_config.configs)?; - initialize_databases( + initialize_server_database( shell, &args.server_db, - &args.prover_db, chain_config.link_to_code.clone(), args.dont_drop, ) @@ -53,10 +51,9 @@ pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { Ok(()) } -pub async fn initialize_databases( +pub async fn initialize_server_database( shell: &Shell, server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, link_to_code: PathBuf, dont_drop: bool, ) -> anyhow::Result<()> { @@ -78,23 +75,6 @@ pub async fn initialize_databases( ) .await?; - if global_config().verbose { - logger::debug(MSG_INITIALIZING_PROVER_DATABASE) - } - if !dont_drop { - drop_db_if_exists(prover_db_config) - .await - .context(MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR)?; - init_db(prover_db_config).await?; - } - let path_to_prover_migration = link_to_code.join(PROVER_MIGRATIONS); - migrate_db( - shell, - path_to_prover_migration, - &prover_db_config.full_url(), - ) - .await?; - Ok(()) } @@ -107,7 +87,7 @@ pub fn update_configs( // Update secrets configs let mut secrets = config.get_secrets_config()?; - set_databases(&mut secrets, &args.server_db, &args.prover_db)?; + set_server_database(&mut secrets, &args.server_db)?; secrets.save_with_base_path(shell, &config.configs)?; // Update general config diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs index 01842c2916a..c1cc03174ae 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs @@ -7,7 +7,7 @@ use xshell::Shell; use crate::{ commands::chain::{ args::genesis::{GenesisArgs, GenesisArgsFinal}, - genesis::{self, database::initialize_databases, server::run_server_genesis}, + genesis::{self, database::initialize_server_database, server::run_server_genesis}, }, messages::{ MSG_CHAIN_NOT_INITIALIZED, MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, @@ -70,16 +70,14 @@ pub async fn genesis( logger::object_to_string(serde_json::json!({ "chain_config": config, "server_db_config": args.server_db, - "prover_db_config": args.prover_db, })), ); logger::info(MSG_STARTING_GENESIS); let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); - initialize_databases( + initialize_server_database( shell, &args.server_db, - &args.prover_db, config.link_to_code.clone(), args.dont_drop, ) From 2a7e72b08d214472b1b97dcabd1e675ebe722c90 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Fri, 4 Oct 2024 16:53:32 +0700 Subject: [PATCH 007/140] fix(ci): Build zk-env with CUDA (#3013) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Ability to build zk-env with cuda not only from push to main branch ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- .github/workflows/zk-environment-publish.yml | 22 +++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 5a08dff178c..73303d15cb3 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -4,6 +4,12 @@ on: # Workflow dispatch, to allow building and pushing new environments. # It will NOT mark them as latest. workflow_dispatch: + inputs: + build_cuda: + description: "Build CUDA images or not" + type: boolean + required: false + default: false push: branches: @@ -202,25 +208,25 @@ jobs: echo "should_run=$changed_files_output" >> "$GITHUB_OUTPUT" - name: Checkout code - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: submodules: "recursive" - name: Log in to US GAR - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - name: Log in to Docker Hub - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io @@ -228,19 +234,19 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 - name: Build and optionally push - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 with: file: docker/zk-environment/20.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile - push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + push: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/main' ) || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-${{ matrix.cuda_version }}:latest matterlabs/zk-environment:cuda-${{ matrix.cuda_version }}-latest From e984bfb8a243bc746549ab9347dc0a367fe02790 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 7 Oct 2024 10:02:09 +0300 Subject: [PATCH 008/140] feat(en): periodically fetch bridge addresses (#2949) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Periodically fetch bridge addresses ## Why ❔ Addresses will be changed during gateway upgrade ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .gitignore | 1 + core/bin/external_node/src/config/mod.rs | 8 ++ core/bin/external_node/src/node_builder.rs | 4 + core/lib/config/src/configs/en_config.rs | 3 +- core/lib/config/src/testonly.rs | 1 + core/lib/protobuf_config/src/en.rs | 11 ++- .../protobuf_config/src/proto/config/en.proto | 1 + .../web3/backend_jsonrpsee/namespaces/zks.rs | 2 +- core/node/api_server/src/web3/mod.rs | 80 +++++++++---------- .../api_server/src/web3/namespaces/zks.rs | 4 +- core/node/api_server/src/web3/state.rs | 72 +++++++---------- core/node/api_server/src/web3/testonly.rs | 4 + .../web3_api/server/bridge_addresses.rs | 48 +++++++++++ .../web3_api/{server.rs => server/mod.rs} | 71 ++++++++++++---- .../layers/web3_api/server/sealed_l2_block.rs | 50 ++++++++++++ .../commands/external_node/prepare_configs.rs | 1 + 16 files changed, 254 insertions(+), 107 deletions(-) create mode 100644 core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs rename core/node/node_framework/src/implementations/layers/web3_api/{server.rs => server/mod.rs} (81%) create mode 100644 core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs diff --git a/.gitignore b/.gitignore index c3de7a2df84..bbd13e2319a 100644 --- a/.gitignore +++ b/.gitignore @@ -114,6 +114,7 @@ prover/data/keys/setup_* # Zk Toolbox chains/era/configs/* +chains/gateway/* configs/* era-observability/ core/tests/ts-integration/deployments-zk diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 9b1677c47c4..56ee3edfd25 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -445,6 +445,8 @@ pub(crate) struct OptionalENConfig { /// Gateway RPC URL, needed for operating during migration. #[allow(dead_code)] pub gateway_url: Option, + /// Interval for bridge addresses refreshing in seconds. + bridge_addresses_refresh_interval_sec: Option, } impl OptionalENConfig { @@ -675,6 +677,7 @@ impl OptionalENConfig { api_namespaces, contracts_diamond_proxy_addr: None, gateway_url: enconfig.gateway_url.clone(), + bridge_addresses_refresh_interval_sec: enconfig.bridge_addresses_refresh_interval_sec, }) } @@ -901,6 +904,11 @@ impl OptionalENConfig { Duration::from_secs(self.pruning_data_retention_sec) } + pub fn bridge_addresses_refresh_interval(&self) -> Option { + self.bridge_addresses_refresh_interval_sec + .map(|n| Duration::from_secs(n.get())) + } + #[cfg(test)] fn mock() -> Self { // Set all values to their defaults diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index d0055896d42..14e09b9c2a7 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -430,6 +430,10 @@ impl ExternalNodeBuilder { response_body_size_limit: Some(self.config.optional.max_response_body_size()), with_extended_tracing: self.config.optional.extended_rpc_tracing, pruning_info_refresh_interval: Some(pruning_info_refresh_interval), + bridge_addresses_refresh_interval: self + .config + .optional + .bridge_addresses_refresh_interval(), polling_interval: Some(self.config.optional.polling_interval()), websocket_requests_per_minute_limit: None, // To be set by WS server layer method if required. replication_lag_limit: None, // TODO: Support replication lag limit diff --git a/core/lib/config/src/configs/en_config.rs b/core/lib/config/src/configs/en_config.rs index 7f130e3539a..4cab47b0779 100644 --- a/core/lib/config/src/configs/en_config.rs +++ b/core/lib/config/src/configs/en_config.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use std::num::{NonZeroU64, NonZeroUsize}; use serde::Deserialize; use zksync_basic_types::{ @@ -19,4 +19,5 @@ pub struct ENConfig { pub main_node_rate_limit_rps: Option, pub gateway_url: Option, + pub bridge_addresses_refresh_interval_sec: Option, } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 6fbbad9d8ff..86d9545b0fb 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -933,6 +933,7 @@ impl Distribution for EncodeDist { main_node_rate_limit_rps: self.sample_opt(|| rng.gen()), gateway_url: self .sample_opt(|| format!("localhost:{}", rng.gen::()).parse().unwrap()), + bridge_addresses_refresh_interval_sec: self.sample_opt(|| rng.gen()), } } } diff --git a/core/lib/protobuf_config/src/en.rs b/core/lib/protobuf_config/src/en.rs index 9c07d1d3929..9d1a3931060 100644 --- a/core/lib/protobuf_config/src/en.rs +++ b/core/lib/protobuf_config/src/en.rs @@ -1,4 +1,7 @@ -use std::{num::NonZeroUsize, str::FromStr}; +use std::{ + num::{NonZeroU64, NonZeroUsize}, + str::FromStr, +}; use anyhow::Context; use zksync_basic_types::{url::SensitiveUrl, L1ChainId, L2ChainId}; @@ -36,6 +39,9 @@ impl ProtoRepr for proto::ExternalNode { .as_ref() .map(|a| a.parse().context("gateway_url")) .transpose()?, + bridge_addresses_refresh_interval_sec: self + .bridge_addresses_refresh_interval_sec + .and_then(NonZeroU64::new), }) } @@ -55,6 +61,9 @@ impl ProtoRepr for proto::ExternalNode { .gateway_url .as_ref() .map(|a| a.expose_str().to_string()), + bridge_addresses_refresh_interval_sec: this + .bridge_addresses_refresh_interval_sec + .map(|a| a.get()), } } } diff --git a/core/lib/protobuf_config/src/proto/config/en.proto b/core/lib/protobuf_config/src/proto/config/en.proto index d8a13d31d4b..69412704ea0 100644 --- a/core/lib/protobuf_config/src/proto/config/en.proto +++ b/core/lib/protobuf_config/src/proto/config/en.proto @@ -10,4 +10,5 @@ message ExternalNode { optional uint64 main_node_rate_limit_rps = 6; // optional optional config.genesis.L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 7; // optional, default to rollup optional string gateway_url = 8; // optional + optional uint64 bridge_addresses_refresh_interval_sec = 9; // optional } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index f83eb37ad96..31c8f15bb1e 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -55,7 +55,7 @@ impl ZksNamespaceServer for ZksNamespace { } async fn get_bridge_contracts(&self) -> RpcResult { - Ok(self.get_bridge_contracts_impl()) + Ok(self.get_bridge_contracts_impl().await) } async fn l1_chain_id(&self) -> RpcResult { diff --git a/core/node/api_server/src/web3/mod.rs b/core/node/api_server/src/web3/mod.rs index bad1b493a5f..620e9185078 100644 --- a/core/node/api_server/src/web3/mod.rs +++ b/core/node/api_server/src/web3/mod.rs @@ -47,6 +47,7 @@ use self::{ use crate::{ execution_sandbox::{BlockStartInfo, VmConcurrencyBarrier}, tx_sender::TxSender, + web3::state::BridgeAddressesHandle, }; pub mod backend_jsonrpsee; @@ -143,7 +144,6 @@ struct OptionalApiParams { #[derive(Debug)] pub struct ApiServer { pool: ConnectionPool, - updaters_pool: ConnectionPool, health_updater: Arc, config: InternalApiConfig, transport: ApiTransport, @@ -153,18 +153,21 @@ pub struct ApiServer { namespaces: Vec, method_tracer: Arc, optional: OptionalApiParams, + bridge_addresses_handle: BridgeAddressesHandle, + sealed_l2_block_handle: SealedL2BlockNumber, } #[derive(Debug)] pub struct ApiBuilder { pool: ConnectionPool, - updaters_pool: ConnectionPool, config: InternalApiConfig, polling_interval: Duration, pruning_info_refresh_interval: Duration, // Mandatory params that must be set using builder methods. transport: Option, tx_sender: Option, + bridge_addresses_handle: Option, + sealed_l2_block_handle: Option, // Optional params that may or may not be set using builder methods. We treat `namespaces` // specially because we want to output a warning if they are not set. namespaces: Option>, @@ -178,13 +181,14 @@ impl ApiBuilder { pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { Self { - updaters_pool: pool.clone(), pool, config, polling_interval: Self::DEFAULT_POLLING_INTERVAL, pruning_info_refresh_interval: Self::DEFAULT_PRUNING_INFO_REFRESH_INTERVAL, transport: None, tx_sender: None, + bridge_addresses_handle: None, + sealed_l2_block_handle: None, namespaces: None, method_tracer: Arc::new(MethodTracer::default()), optional: OptionalApiParams::default(), @@ -201,15 +205,6 @@ impl ApiBuilder { self } - /// Configures a dedicated DB pool to be used for updating different information, - /// such as last mined block number or account nonces. This pool is used to execute - /// in a background task. If not called, the main pool will be used. If the API server is under high load, - /// it may make sense to supply a single-connection pool to reduce pool contention with the API methods. - pub fn with_updaters_pool(mut self, pool: ConnectionPool) -> Self { - self.updaters_pool = pool; - self - } - pub fn with_tx_sender(mut self, tx_sender: TxSender) -> Self { self.tx_sender = Some(tx_sender); self @@ -285,6 +280,22 @@ impl ApiBuilder { self } + pub fn with_sealed_l2_block_handle( + mut self, + sealed_l2_block_handle: SealedL2BlockNumber, + ) -> Self { + self.sealed_l2_block_handle = Some(sealed_l2_block_handle); + self + } + + pub fn with_bridge_addresses_handle( + mut self, + bridge_addresses_handle: BridgeAddressesHandle, + ) -> Self { + self.bridge_addresses_handle = Some(bridge_addresses_handle); + self + } + // Intended for tests only. #[doc(hidden)] fn with_pub_sub_events(mut self, sender: mpsc::UnboundedSender) -> Self { @@ -312,7 +323,6 @@ impl ApiBuilder { Ok(ApiServer { pool: self.pool, health_updater: Arc::new(health_updater), - updaters_pool: self.updaters_pool, config: self.config, transport, tx_sender: self.tx_sender.context("Transaction sender not set")?, @@ -326,6 +336,12 @@ impl ApiBuilder { }), method_tracer: self.method_tracer, optional: self.optional, + sealed_l2_block_handle: self + .sealed_l2_block_handle + .context("Sealed l2 block handle not set")?, + bridge_addresses_handle: self + .bridge_addresses_handle + .context("Bridge addresses handle not set")?, }) } } @@ -335,11 +351,8 @@ impl ApiServer { self.health_updater.subscribe() } - async fn build_rpc_state( - self, - last_sealed_l2_block: SealedL2BlockNumber, - ) -> anyhow::Result { - let mut storage = self.updaters_pool.connection_tagged("api").await?; + async fn build_rpc_state(self) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("api").await?; let start_info = BlockStartInfo::new(&mut storage, self.pruning_info_refresh_interval).await?; drop(storage); @@ -363,7 +376,8 @@ impl ApiServer { api_config: self.config, start_info, mempool_cache: self.optional.mempool_cache, - last_sealed_l2_block, + last_sealed_l2_block: self.sealed_l2_block_handle, + bridge_addresses_handle: self.bridge_addresses_handle, tree_api: self.optional.tree_api, }) } @@ -371,11 +385,10 @@ impl ApiServer { async fn build_rpc_module( self, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, ) -> anyhow::Result> { let namespaces = self.namespaces.clone(); let zksync_network_id = self.config.l2_chain_id; - let rpc_state = self.build_rpc_state(last_sealed_l2_block).await?; + let rpc_state = self.build_rpc_state().await?; // Collect all the methods into a single RPC module. let mut rpc = RpcModule::new(()); @@ -473,21 +486,9 @@ impl ApiServer { self, stop_receiver: watch::Receiver, ) -> anyhow::Result { - // Chosen to be significantly smaller than the interval between L2 blocks, but larger than - // the latency of getting the latest sealed L2 block number from Postgres. If the API server - // processes enough requests, information about the latest sealed L2 block will be updated - // by reporting block difference metrics, so the actual update lag would be much smaller than this value. - const SEALED_L2_BLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25); - let transport = self.transport; + let mut tasks = vec![]; - let (last_sealed_l2_block, sealed_l2_block_update_task) = SealedL2BlockNumber::new( - self.updaters_pool.clone(), - SEALED_L2_BLOCK_UPDATE_INTERVAL, - stop_receiver.clone(), - ); - - let mut tasks = vec![tokio::spawn(sealed_l2_block_update_task)]; let pub_sub = if matches!(transport, ApiTransport::WebSocket(_)) && self.namespaces.contains(&Namespace::Pubsub) { @@ -510,12 +511,8 @@ impl ApiServer { // framework it'll no longer be needed. let health_check = self.health_updater.subscribe(); let (local_addr_sender, local_addr) = oneshot::channel(); - let server_task = tokio::spawn(self.run_jsonrpsee_server( - stop_receiver, - pub_sub, - last_sealed_l2_block, - local_addr_sender, - )); + let server_task = + tokio::spawn(self.run_jsonrpsee_server(stop_receiver, pub_sub, local_addr_sender)); tasks.push(server_task); Ok(ApiServerHandles { @@ -584,7 +581,6 @@ impl ApiServer { self, mut stop_receiver: watch::Receiver, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, local_addr_sender: oneshot::Sender, ) -> anyhow::Result<()> { let transport = self.transport; @@ -640,7 +636,7 @@ impl ApiServer { tracing::info!("Enabled extended call tracing for {transport_str} API server; this might negatively affect performance"); } - let rpc = self.build_rpc_module(pub_sub, last_sealed_l2_block).await?; + let rpc = self.build_rpc_module(pub_sub).await?; let registered_method_names = Arc::new(rpc.method_names().collect::>()); tracing::debug!( "Built RPC module for {transport_str} server with {} methods: {registered_method_names:?}", diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 61456095d67..2192f11eb14 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -132,8 +132,8 @@ impl ZksNamespace { self.state.api_config.l2_testnet_paymaster_addr } - pub fn get_bridge_contracts_impl(&self) -> BridgeAddresses { - self.state.api_config.bridge_addresses.clone() + pub async fn get_bridge_contracts_impl(&self) -> BridgeAddresses { + self.state.bridge_addresses_handle.read().await } pub fn l1_chain_id_impl(&self) -> U64 { diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 8cbb75103cd..723661ab908 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -4,13 +4,13 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::{Duration, Instant}, + time::Instant, }; use anyhow::Context as _; use futures::TryFutureExt; use lru::LruCache; -use tokio::sync::{watch, Mutex}; +use tokio::sync::{Mutex, RwLock}; use vise::GaugeGuard; use zksync_config::{ configs::{api::Web3JsonRpcConfig, ContractsConfig}, @@ -20,8 +20,9 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_metadata_calculator::api_server::TreeApiClient; use zksync_node_sync::SyncState; use zksync_types::{ - api, commitment::L1BatchCommitmentMode, l2::L2Tx, transaction_request::CallRequest, Address, - L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, H256, U256, U64, + api, api::BridgeAddresses, commitment::L1BatchCommitmentMode, l2::L2Tx, + transaction_request::CallRequest, Address, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, + H256, U256, U64, }; use zksync_web3_decl::{error::Web3Error, types::Filter}; @@ -173,51 +174,16 @@ impl InternalApiConfig { /// Thread-safe updatable information about the last sealed L2 block number. /// /// The information may be temporarily outdated and thus should only be used where this is OK -/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`] -/// and on an interval specified when creating an instance. -#[derive(Debug, Clone)] -pub(crate) struct SealedL2BlockNumber(Arc); +/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`]. +#[derive(Debug, Clone, Default)] +pub struct SealedL2BlockNumber(Arc); impl SealedL2BlockNumber { - /// Creates a handle to the last sealed L2 block number together with a task that will update - /// it on a schedule. - pub fn new( - connection_pool: ConnectionPool, - update_interval: Duration, - stop_receiver: watch::Receiver, - ) -> (Self, impl Future>) { - let this = Self(Arc::default()); - let number_updater = this.clone(); - - let update_task = async move { - loop { - if *stop_receiver.borrow() { - tracing::debug!("Stopping latest sealed L2 block updates"); - return Ok(()); - } - - let mut connection = connection_pool.connection_tagged("api").await.unwrap(); - let Some(last_sealed_l2_block) = - connection.blocks_dal().get_sealed_l2_block_number().await? - else { - tokio::time::sleep(update_interval).await; - continue; - }; - drop(connection); - - number_updater.update(last_sealed_l2_block); - tokio::time::sleep(update_interval).await; - } - }; - - (this, update_task) - } - /// Potentially updates the last sealed L2 block number by comparing it to the provided /// sealed L2 block number (not necessarily the last one). /// /// Returns the last sealed L2 block number after the update. - fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { + pub fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { let prev_value = self .0 .fetch_max(maybe_newer_l2_block_number.0, Ordering::Relaxed); @@ -231,7 +197,7 @@ impl SealedL2BlockNumber { /// Returns the difference between the latest L2 block number and the resolved L2 block number /// from `block_args`. - pub fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { + pub(crate) fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { // We compute the difference in any case, since it may update the stored value. let diff = self.diff(block_args.resolved_block_number()); @@ -243,6 +209,23 @@ impl SealedL2BlockNumber { } } +#[derive(Debug, Clone)] +pub struct BridgeAddressesHandle(Arc>); + +impl BridgeAddressesHandle { + pub fn new(bridge_addresses: BridgeAddresses) -> Self { + Self(Arc::new(RwLock::new(bridge_addresses))) + } + + pub async fn update(&self, bridge_addresses: BridgeAddresses) { + *self.0.write().await = bridge_addresses; + } + + pub async fn read(&self) -> BridgeAddresses { + self.0.read().await.clone() + } +} + /// Holder for the data required for the API to be functional. #[derive(Debug, Clone)] pub(crate) struct RpcState { @@ -258,6 +241,7 @@ pub(crate) struct RpcState { pub(super) start_info: BlockStartInfo, pub(super) mempool_cache: Option, pub(super) last_sealed_l2_block: SealedL2BlockNumber, + pub(super) bridge_addresses_handle: BridgeAddressesHandle, } impl RpcState { diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 93309fc09cf..3b05e235c6d 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -181,6 +181,8 @@ async fn spawn_server( let mut namespaces = Namespace::DEFAULT.to_vec(); namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = BridgeAddressesHandle::new(api_config.bridge_addresses.clone()); let server_builder = match transport { ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), @@ -202,6 +204,8 @@ async fn spawn_server( .with_pub_sub_events(pub_sub_events_sender) .with_method_tracer(method_tracer) .enable_api_namespaces(namespaces) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle) .build() .expect("Unable to build API server") .run(stop_receiver) diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs new file mode 100644 index 00000000000..4ba8098c839 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs @@ -0,0 +1,48 @@ +use std::time::Duration; + +use zksync_node_api_server::web3::state::BridgeAddressesHandle; +use zksync_web3_decl::{ + client::{DynClient, L2}, + namespaces::ZksNamespaceClient, +}; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct BridgeAddressesUpdaterTask { + pub bridge_address_updater: BridgeAddressesHandle, + pub main_node_client: Box>, + pub update_interval: Option, +} + +#[async_trait::async_trait] +impl Task for BridgeAddressesUpdaterTask { + fn id(&self) -> TaskId { + "bridge_addresses_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + const DEFAULT_INTERVAL: Duration = Duration::from_secs(30); + + let update_interval = self.update_interval.unwrap_or(DEFAULT_INTERVAL); + while !*stop_receiver.0.borrow_and_update() { + match self.main_node_client.get_bridge_contracts().await { + Ok(bridge_addresses) => { + self.bridge_address_updater.update(bridge_addresses).await; + } + Err(err) => { + tracing::error!("Failed to query `get_bridge_contracts`, error: {err:?}"); + } + } + + if tokio::time::timeout(update_interval, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs similarity index 81% rename from core/node/node_framework/src/implementations/layers/web3_api/server.rs rename to core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs index 0a39ae747c7..390d321647c 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs @@ -3,15 +3,24 @@ use std::{num::NonZeroU32, time::Duration}; use tokio::{sync::oneshot, task::JoinHandle}; use zksync_circuit_breaker::replication_lag::ReplicationLagChecker; use zksync_config::configs::api::MaxResponseSize; -use zksync_node_api_server::web3::{state::InternalApiConfig, ApiBuilder, ApiServer, Namespace}; +use zksync_node_api_server::web3::{ + state::{BridgeAddressesHandle, InternalApiConfig, SealedL2BlockNumber}, + ApiBuilder, ApiServer, Namespace, +}; use crate::{ - implementations::resources::{ - circuit_breakers::CircuitBreakersResource, - healthcheck::AppHealthCheckResource, - pools::{PoolResource, ReplicaPool}, - sync_state::SyncStateResource, - web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + implementations::{ + layers::web3_api::server::{ + bridge_addresses::BridgeAddressesUpdaterTask, sealed_l2_block::SealedL2BlockUpdaterTask, + }, + resources::{ + circuit_breakers::CircuitBreakersResource, + healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + pools::{PoolResource, ReplicaPool}, + sync_state::SyncStateResource, + web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + }, }, service::StopReceiver, task::{Task, TaskId}, @@ -19,6 +28,9 @@ use crate::{ FromContext, IntoContext, }; +mod bridge_addresses; +mod sealed_l2_block; + /// Set of optional variables that can be altered to modify the behavior of API builder. #[derive(Debug, Default)] pub struct Web3ServerOptionalConfig { @@ -33,6 +45,8 @@ pub struct Web3ServerOptionalConfig { pub replication_lag_limit: Option, // Used by the external node. pub pruning_info_refresh_interval: Option, + // Used by the external node. + pub bridge_addresses_refresh_interval: Option, pub polling_interval: Option, } @@ -61,6 +75,10 @@ impl Web3ServerOptionalConfig { if let Some(polling_interval) = self.polling_interval { api_builder = api_builder.with_polling_interval(polling_interval); } + if let Some(pruning_info_refresh_interval) = self.pruning_info_refresh_interval { + api_builder = + api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); + } api_builder = api_builder.with_extended_tracing(self.with_extended_tracing); api_builder } @@ -109,6 +127,7 @@ pub struct Input { pub circuit_breakers: CircuitBreakersResource, #[context(default)] pub app_health: AppHealthCheckResource, + pub main_node_client: Option, } #[derive(Debug, IntoContext)] @@ -118,6 +137,10 @@ pub struct Output { pub web3_api_task: Web3ApiTask, #[context(task)] pub garbage_collector_task: ApiTaskGarbageCollector, + #[context(task)] + pub sealed_l2_block_updater_task: SealedL2BlockUpdaterTask, + #[context(task)] + pub bridge_addresses_updater_task: Option, } impl Web3ServerLayer { @@ -163,20 +186,39 @@ impl WiringLayer for Web3ServerLayer { async fn wire(self, input: Self::Input) -> Result { // Get required resources. let replica_resource_pool = input.replica_pool; - let updaters_pool = replica_resource_pool.get_custom(2).await?; + let updaters_pool = replica_resource_pool.get_custom(1).await?; let replica_pool = replica_resource_pool.get().await?; let TxSenderResource(tx_sender) = input.tx_sender; let MempoolCacheResource(mempool_cache) = input.mempool_cache; let sync_state = input.sync_state.map(|state| state.0); let tree_api_client = input.tree_api_client.map(|client| client.0); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = + BridgeAddressesHandle::new(self.internal_api_config.bridge_addresses.clone()); + + let sealed_l2_block_updater_task = SealedL2BlockUpdaterTask { + number_updater: sealed_l2_block_handle.clone(), + pool: updaters_pool, + }; + // Bridge addresses updater task must be started for ENs and only for ENs. + let bridge_addresses_updater_task = + input + .main_node_client + .map(|main_node_client| BridgeAddressesUpdaterTask { + bridge_address_updater: bridge_addresses_handle.clone(), + main_node_client: main_node_client.0, + update_interval: self.optional_config.bridge_addresses_refresh_interval, + }); + // Build server. let mut api_builder = ApiBuilder::jsonrpsee_backend(self.internal_api_config, replica_pool.clone()) - .with_updaters_pool(updaters_pool) .with_tx_sender(tx_sender) .with_mempool_cache(mempool_cache) - .with_extended_tracing(self.optional_config.with_extended_tracing); + .with_extended_tracing(self.optional_config.with_extended_tracing) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle); if let Some(client) = tree_api_client { api_builder = api_builder.with_tree_api(client); } @@ -191,14 +233,9 @@ impl WiringLayer for Web3ServerLayer { if let Some(sync_state) = sync_state { api_builder = api_builder.with_sync_state(sync_state); } - if let Some(pruning_info_refresh_interval) = - self.optional_config.pruning_info_refresh_interval - { - api_builder = - api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); - } let replication_lag_limit = self.optional_config.replication_lag_limit; api_builder = self.optional_config.apply(api_builder); + let server = api_builder.build()?; // Insert healthcheck. @@ -230,6 +267,8 @@ impl WiringLayer for Web3ServerLayer { Ok(Output { web3_api_task, garbage_collector_task, + sealed_l2_block_updater_task, + bridge_addresses_updater_task, }) } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs new file mode 100644 index 00000000000..02552e212cd --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs @@ -0,0 +1,50 @@ +use std::time::Duration; + +use zksync_dal::{Core, CoreDal}; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_node_api_server::web3::state::SealedL2BlockNumber; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct SealedL2BlockUpdaterTask { + pub number_updater: SealedL2BlockNumber, + pub pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl Task for SealedL2BlockUpdaterTask { + fn id(&self) -> TaskId { + "api_sealed_l2_block_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // Chosen to be significantly smaller than the interval between L2 blocks, but larger than + // the latency of getting the latest sealed L2 block number from Postgres. If the API server + // processes enough requests, information about the latest sealed L2 block will be updated + // by reporting block difference metrics, so the actual update lag would be much smaller than this value. + const UPDATE_INTERVAL: Duration = Duration::from_millis(25); + + while !*stop_receiver.0.borrow_and_update() { + let mut connection = self.pool.connection_tagged("api").await.unwrap(); + let Some(last_sealed_l2_block) = + connection.blocks_dal().get_sealed_l2_block_number().await? + else { + tokio::time::sleep(UPDATE_INTERVAL).await; + continue; + }; + drop(connection); + + self.number_updater.update(last_sealed_l2_block); + + if tokio::time::timeout(UPDATE_INTERVAL, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs index defbbd12d40..5ab859d17f0 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -76,6 +76,7 @@ fn prepare_configs( )?, main_node_rate_limit_rps: None, gateway_url: None, + bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); From 4640c4233af46c97f207d2dbce5fedd1bcb66c43 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:52:22 +0300 Subject: [PATCH 009/140] feat(eth-watch): catch another reth error (#3026) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/node/eth_watch/src/client.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 54376bae82e..ac5fc86c6e9 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -46,7 +46,8 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { pub const RETRY_LIMIT: usize = 5; const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; -const TOO_MANY_RESULTS_RETH: &str = "query exceeds max block range"; +const TOO_MANY_RESULTS_RETH: &str = "length limit exceeded"; +const TOO_BIG_RANGE_RETH: &str = "query exceeds max block range"; const TOO_MANY_RESULTS_CHAINSTACK: &str = "range limit exceeded"; /// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). @@ -149,6 +150,7 @@ impl EthHttpQueryClient { if err_message.contains(TOO_MANY_RESULTS_INFURA) || err_message.contains(TOO_MANY_RESULTS_ALCHEMY) || err_message.contains(TOO_MANY_RESULTS_RETH) + || err_message.contains(TOO_BIG_RANGE_RETH) || err_message.contains(TOO_MANY_RESULTS_CHAINSTACK) { // get the numeric block ids From 314076909dae73d171eb56b489936f36b402befe Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 7 Oct 2024 14:52:42 +0300 Subject: [PATCH 010/140] test(api): Add more `TxSender` tests (#3001) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Covers more `TxSender` functionality with unit tests. ## Why ❔ More test coverage is good. Also, it will help with new VM integration (by running the added tests with a shadowed VM). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/lib/types/src/api/state_override.rs | 11 + .../api_server/src/execution_sandbox/tests.rs | 24 +- core/node/api_server/src/testonly.rs | 376 +++++++- core/node/api_server/src/tx_sender/tests.rs | 805 ------------------ .../api_server/src/tx_sender/tests/call.rs | 253 ++++++ .../src/tx_sender/tests/gas_estimation.rs | 466 ++++++++++ .../api_server/src/tx_sender/tests/mod.rs | 161 ++++ .../api_server/src/tx_sender/tests/send_tx.rs | 293 +++++++ 8 files changed, 1533 insertions(+), 856 deletions(-) delete mode 100644 core/node/api_server/src/tx_sender/tests.rs create mode 100644 core/node/api_server/src/tx_sender/tests/call.rs create mode 100644 core/node/api_server/src/tx_sender/tests/gas_estimation.rs create mode 100644 core/node/api_server/src/tx_sender/tests/mod.rs create mode 100644 core/node/api_server/src/tx_sender/tests/send_tx.rs diff --git a/core/lib/types/src/api/state_override.rs b/core/lib/types/src/api/state_override.rs index a2497a65c53..f2986610840 100644 --- a/core/lib/types/src/api/state_override.rs +++ b/core/lib/types/src/api/state_override.rs @@ -21,6 +21,11 @@ impl StateOverride { self.0.get(address) } + /// Gets mutable overrides for the specified account. + pub fn get_mut(&mut self, address: &Address) -> Option<&mut OverrideAccount> { + self.0.get_mut(address) + } + /// Iterates over all account overrides. pub fn iter(&self) -> impl Iterator + '_ { self.0.iter() @@ -48,6 +53,12 @@ impl Bytecode { } } +impl AsRef<[u8]> for Bytecode { + fn as_ref(&self) -> &[u8] { + &self.0 .0 + } +} + impl Serialize for Bytecode { fn serialize(&self, serializer: S) -> Result { self.0.serialize(serializer) diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index c4c1ee314db..306018e1543 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -11,6 +11,7 @@ use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api::state_override::{OverrideAccount, StateOverride}, + fee::Fee, fee_model::BatchFeeInput, K256PrivateKey, ProtocolVersionId, Transaction, U256, }; @@ -210,11 +211,16 @@ async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args let fee_input = BatchFeeInput::l1_pegged(55, 555); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = Transaction::from(K256PrivateKey::random().create_transfer( + let tx = K256PrivateKey::random().create_transfer_with_fee( 0.into(), - base_fee, - gas_per_pubdata, - )); + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); + let tx = Transaction::from(tx); let (limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = limiter.acquire().await.unwrap(); @@ -253,7 +259,15 @@ async fn validating_transaction(set_balance: bool) { let fee_input = BatchFeeInput::l1_pegged(55, 555); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = K256PrivateKey::random().create_transfer(0.into(), base_fee, gas_per_pubdata); + let tx = K256PrivateKey::random().create_transfer_with_fee( + 0.into(), + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); let (limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = limiter.acquire().await.unwrap(); diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 5ee9cfb8ef1..8dc7915385a 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -1,53 +1,44 @@ //! Test utils shared among multiple modules. -use std::iter; +use std::{collections::HashMap, iter}; use zk_evm_1_5_0::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_contracts::{ - get_loadnext_contract, load_contract, read_bytecode, + eth_contract, get_loadnext_contract, load_contract, read_bytecode, test_contracts::LoadnextContractExecutionParams, }; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; +use zksync_node_fee_model::BatchFeeModelInputProvider; +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ - ethabi::Token, fee::Fee, l2::L2Tx, transaction_request::PaymasterParams, Address, - K256PrivateKey, L2ChainId, Nonce, H256, U256, + api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, + ethabi, + ethabi::Token, + fee::Fee, + fee_model::FeeParams, + get_code_key, get_known_code_key, + l2::L2Tx, + transaction_request::{CallRequest, PaymasterParams}, + utils::storage_key_for_eth_balance, + AccountTreeId, Address, K256PrivateKey, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, + StorageKey, StorageLog, H256, U256, }; - -pub(crate) const LOAD_TEST_ADDRESS: Address = Address::repeat_byte(1); +use zksync_utils::{address_to_u256, u256_to_h256}; const EXPENSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; -pub(crate) const EXPENSIVE_CONTRACT_ADDRESS: Address = Address::repeat_byte(2); - const PRECOMPILES_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json"; -pub(crate) const PRECOMPILES_CONTRACT_ADDRESS: Address = Address::repeat_byte(3); - const COUNTER_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json"; -pub(crate) const COUNTER_CONTRACT_ADDRESS: Address = Address::repeat_byte(4); - const INFINITE_LOOP_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/infinite/infinite.sol/InfiniteLoop.json"; -pub(crate) const INFINITE_LOOP_CONTRACT_ADDRESS: Address = Address::repeat_byte(5); - -pub(crate) fn read_expensive_contract_bytecode() -> Vec { - read_bytecode(EXPENSIVE_CONTRACT_PATH) -} - -pub(crate) fn read_precompiles_contract_bytecode() -> Vec { - read_bytecode(PRECOMPILES_CONTRACT_PATH) -} - -pub(crate) fn read_counter_contract_bytecode() -> Vec { - read_bytecode(COUNTER_CONTRACT_PATH) -} - -pub(crate) fn read_infinite_loop_contract_bytecode() -> Vec { - read_bytecode(INFINITE_LOOP_CONTRACT_PATH) -} +const MULTICALL3_CONTRACT_PATH: &str = + "contracts/l2-contracts/artifacts-zk/contracts/dev-contracts/Multicall3.sol/Multicall3.json"; /// Inflates the provided bytecode by appending the specified amount of NOP instructions at the end. -pub(crate) fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { +fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { bytecode.extend( iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) .take(nop_count) @@ -56,25 +47,270 @@ pub(crate) fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { } fn default_fee() -> Fee { + let fee_input = ::default_batch_fee_input_scaled( + FeeParams::sensible_v1_default(), + 1.0, + 1.0, + ); + let (max_fee_per_gas, gas_per_pubdata_limit) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::default().into()); Fee { - gas_limit: 200_000.into(), - max_fee_per_gas: 55.into(), + gas_limit: 10_000_000.into(), + max_fee_per_gas: max_fee_per_gas.into(), max_priority_fee_per_gas: 0_u64.into(), - gas_per_pubdata_limit: 555.into(), + gas_per_pubdata_limit: gas_per_pubdata_limit.into(), } } +#[derive(Debug, Default)] +pub(crate) struct StateBuilder { + inner: HashMap, +} + +impl StateBuilder { + pub(crate) const LOAD_TEST_ADDRESS: Address = Address::repeat_byte(1); + pub(crate) const EXPENSIVE_CONTRACT_ADDRESS: Address = Address::repeat_byte(2); + pub(crate) const PRECOMPILES_CONTRACT_ADDRESS: Address = Address::repeat_byte(3); + const COUNTER_CONTRACT_ADDRESS: Address = Address::repeat_byte(4); + const INFINITE_LOOP_CONTRACT_ADDRESS: Address = Address::repeat_byte(5); + const MULTICALL3_ADDRESS: Address = Address::repeat_byte(6); + + pub fn with_contract(mut self, address: Address, bytecode: Vec) -> Self { + self.inner.insert( + address, + OverrideAccount { + code: Some(Bytecode::new(bytecode).unwrap()), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn inflate_bytecode(mut self, address: Address, nop_count: usize) -> Self { + let account_override = self.inner.get_mut(&address).expect("no contract"); + let bytecode = account_override.code.take().expect("no code override"); + let mut bytecode = bytecode.into_bytes(); + inflate_bytecode(&mut bytecode, nop_count); + account_override.code = Some(Bytecode::new(bytecode).unwrap()); + self + } + + pub fn with_load_test_contract(mut self) -> Self { + // Set the array length in the load test contract to 100, so that reads don't fail. + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); + self.inner.insert( + Self::LOAD_TEST_ADDRESS, + OverrideAccount { + code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), + state: Some(OverrideState::State(state)), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn with_balance(mut self, address: Address, balance: U256) -> Self { + self.inner.entry(address).or_default().balance = Some(balance); + self + } + + pub fn with_expensive_contract(self) -> Self { + self.with_contract( + Self::EXPENSIVE_CONTRACT_ADDRESS, + read_bytecode(EXPENSIVE_CONTRACT_PATH), + ) + } + + pub fn with_precompiles_contract(self) -> Self { + self.with_contract( + Self::PRECOMPILES_CONTRACT_ADDRESS, + read_bytecode(PRECOMPILES_CONTRACT_PATH), + ) + } + + pub fn with_counter_contract(self, initial_value: u64) -> Self { + let mut this = self.with_contract( + Self::COUNTER_CONTRACT_ADDRESS, + read_bytecode(COUNTER_CONTRACT_PATH), + ); + if initial_value != 0 { + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(initial_value))]); + this.inner + .get_mut(&Self::COUNTER_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::State(state)); + } + this + } + + pub fn with_infinite_loop_contract(self) -> Self { + self.with_contract( + Self::INFINITE_LOOP_CONTRACT_ADDRESS, + read_bytecode(INFINITE_LOOP_CONTRACT_PATH), + ) + } + + pub fn with_multicall3_contract(self) -> Self { + self.with_contract( + Self::MULTICALL3_ADDRESS, + read_bytecode(MULTICALL3_CONTRACT_PATH), + ) + } + + pub fn build(self) -> StateOverride { + StateOverride::new(self.inner) + } + + /// Applies these state overrides to Postgres storage, which is assumed to be empty (other than genesis data). + pub async fn apply(self, connection: &mut Connection<'_, Core>) { + let mut storage_logs = vec![]; + let mut factory_deps = HashMap::new(); + for (address, account) in self.inner { + if let Some(balance) = account.balance { + let balance_key = storage_key_for_eth_balance(&address); + storage_logs.push(StorageLog::new_write_log( + balance_key, + u256_to_h256(balance), + )); + } + if let Some(code) = account.code { + let code_hash = code.hash(); + storage_logs.extend([ + StorageLog::new_write_log(get_code_key(&address), code_hash), + StorageLog::new_write_log( + get_known_code_key(&code_hash), + H256::from_low_u64_be(1), + ), + ]); + factory_deps.insert(code_hash, code.into_bytes()); + } + if let Some(state) = account.state { + let state_slots = match state { + OverrideState::State(slots) | OverrideState::StateDiff(slots) => slots, + }; + let state_logs = state_slots.into_iter().map(|(key, value)| { + let key = StorageKey::new(AccountTreeId::new(address), key); + StorageLog::new_write_log(key, value) + }); + storage_logs.extend(state_logs); + } + } + + connection + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &storage_logs) + .await + .unwrap(); + connection + .factory_deps_dal() + .insert_factory_deps(L2BlockNumber(0), &factory_deps) + .await + .unwrap(); + } +} + +#[derive(Debug)] +pub(crate) struct Call3Value { + target: Address, + allow_failure: bool, + value: U256, + calldata: Vec, +} + +impl Call3Value { + pub fn allow_failure(mut self) -> Self { + self.allow_failure = true; + self + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::Address(self.target), + Token::Bool(self.allow_failure), + Token::Uint(self.value), + Token::Bytes(self.calldata.clone()), + ]) + } +} + +impl From for Call3Value { + fn from(req: CallRequest) -> Self { + Self { + target: req.to.unwrap(), + allow_failure: false, + value: req.value.unwrap_or_default(), + calldata: req.data.unwrap_or_default().0, + } + } +} + +impl From for Call3Value { + fn from(tx: L2Tx) -> Self { + Self { + target: tx.recipient_account().unwrap(), + allow_failure: false, + value: tx.execute.value, + calldata: tx.execute.calldata, + } + } +} + +#[derive(Debug)] +pub(crate) struct Call3Result { + pub success: bool, + pub return_data: Vec, +} + +impl Call3Result { + pub fn parse(raw: &[u8]) -> Vec { + let mut tokens = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .decode_output(raw) + .expect("failed decoding `aggregate3Value` output"); + assert_eq!(tokens.len(), 1, "Invalid output length"); + let Token::Array(results) = tokens.pop().unwrap() else { + panic!("Invalid token type, expected an array"); + }; + results.into_iter().map(Self::parse_single).collect() + } + + fn parse_single(token: Token) -> Self { + let Token::Tuple(mut tokens) = token else { + panic!("Invalid token type, expected a tuple"); + }; + assert_eq!(tokens.len(), 2); + let return_data = tokens.pop().unwrap().into_bytes().expect("expected bytes"); + let success = tokens.pop().unwrap().into_bool().expect("expected bool"); + Self { + success, + return_data, + } + } + + pub fn as_u256(&self) -> U256 { + decode_u256_output(&self.return_data) + } +} + +pub(crate) fn decode_u256_output(raw_output: &[u8]) -> U256 { + let mut tokens = ethabi::decode_whole(&[ethabi::ParamType::Uint(256)], raw_output) + .expect("unexpected return data"); + assert_eq!(tokens.len(), 1); + tokens.pop().unwrap().into_uint().unwrap() +} + pub(crate) trait TestAccount { - fn create_transfer(&self, value: U256, fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { + fn create_transfer(&self, value: U256) -> L2Tx { let fee = Fee { gas_limit: 200_000.into(), - max_fee_per_gas: fee_per_gas.into(), - max_priority_fee_per_gas: 0_u64.into(), - gas_per_pubdata_limit: gas_per_pubdata.into(), + ..default_fee() }; self.create_transfer_with_fee(value, fee) } + fn query_base_token_balance(&self) -> CallRequest; + fn create_transfer_with_fee(&self, value: U256, fee: Fee) -> L2Tx; fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx; @@ -85,9 +321,13 @@ pub(crate) trait TestAccount { fn create_code_oracle_tx(&self, bytecode_hash: H256, expected_keccak_hash: H256) -> L2Tx; - fn create_reverting_counter_tx(&self) -> L2Tx; + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx; + + fn query_counter_value(&self) -> CallRequest; fn create_infinite_loop_tx(&self) -> L2Tx; + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest; } impl TestAccount for K256PrivateKey { @@ -106,9 +346,23 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn query_base_token_balance(&self) -> CallRequest { + let data = eth_contract() + .function("balanceOf") + .expect("No `balanceOf` function in contract") + .encode_input(&[Token::Uint(address_to_u256(&self.address()))]) + .expect("failed encoding `balanceOf` function"); + CallRequest { + from: Some(self.address()), + to: Some(L2_BASE_TOKEN_ADDRESS), + data: Some(data.into()), + ..CallRequest::default() + } + } + fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx { L2Tx::new_signed( - Some(LOAD_TEST_ADDRESS), + Some(StateBuilder::LOAD_TEST_ADDRESS), params.to_bytes(), Nonce(0), default_fee(), @@ -132,7 +386,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[Token::Uint(write_count.into())]) .expect("failed encoding `expensive` function"); L2Tx::new_signed( - Some(EXPENSIVE_CONTRACT_ADDRESS), + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -152,7 +406,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[]) .expect("failed encoding `cleanUp` input"); L2Tx::new_signed( - Some(EXPENSIVE_CONTRACT_ADDRESS), + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -175,7 +429,7 @@ impl TestAccount for K256PrivateKey { ]) .expect("failed encoding `callCodeOracle` input"); L2Tx::new_signed( - Some(PRECOMPILES_CONTRACT_ADDRESS), + Some(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -188,14 +442,14 @@ impl TestAccount for K256PrivateKey { .unwrap() } - fn create_reverting_counter_tx(&self) -> L2Tx { + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx { let calldata = load_contract(COUNTER_CONTRACT_PATH) .function("incrementWithRevert") .expect("no `incrementWithRevert` function") - .encode_input(&[Token::Uint(1.into()), Token::Bool(true)]) + .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) .expect("failed encoding `incrementWithRevert` input"); L2Tx::new_signed( - Some(COUNTER_CONTRACT_ADDRESS), + Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -208,6 +462,20 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn query_counter_value(&self) -> CallRequest { + let calldata = load_contract(COUNTER_CONTRACT_PATH) + .function("get") + .expect("no `get` function") + .encode_input(&[]) + .expect("failed encoding `get` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + data: Some(calldata.into()), + ..CallRequest::default() + } + } + fn create_infinite_loop_tx(&self) -> L2Tx { let calldata = load_contract(INFINITE_LOOP_CONTRACT_PATH) .function("infiniteLoop") @@ -215,7 +483,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[]) .expect("failed encoding `infiniteLoop` input"); L2Tx::new_signed( - Some(INFINITE_LOOP_CONTRACT_ADDRESS), + Some(StateBuilder::INFINITE_LOOP_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -227,4 +495,20 @@ impl TestAccount for K256PrivateKey { ) .unwrap() } + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest { + let call_tokens = calls.iter().map(Call3Value::to_token).collect(); + let calldata = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .encode_input(&[Token::Array(call_tokens)]) + .expect("failed encoding `aggregate3Value` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::MULTICALL3_ADDRESS), + value: Some(value), + data: Some(calldata.into()), + ..CallRequest::default() + } + } } diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs deleted file mode 100644 index 36c95fa5db0..00000000000 --- a/core/node/api_server/src/tx_sender/tests.rs +++ /dev/null @@ -1,805 +0,0 @@ -//! Tests for the transaction sender. - -use std::{collections::HashMap, time::Duration}; - -use assert_matches::assert_matches; -use test_casing::{test_casing, Product, TestCases}; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_multivm::interface::ExecutionResult; -use zksync_node_fee_model::MockBatchFeeParamsProvider; -use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; -use zksync_system_constants::CODE_ORACLE_ADDRESS; -use zksync_types::{ - api, - api::state_override::{Bytecode, OverrideAccount, OverrideState}, - get_nonce_key, - web3::keccak256, - K256PrivateKey, L1BatchNumber, L2BlockNumber, StorageLog, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use zksync_vm_executor::oneshot::MockOneshotExecutor; - -use super::{gas_estimation::GasEstimator, *}; -use crate::{ - execution_sandbox::BlockStartInfo, - testonly::{ - inflate_bytecode, read_counter_contract_bytecode, read_expensive_contract_bytecode, - read_infinite_loop_contract_bytecode, read_precompiles_contract_bytecode, TestAccount, - COUNTER_CONTRACT_ADDRESS, EXPENSIVE_CONTRACT_ADDRESS, INFINITE_LOOP_CONTRACT_ADDRESS, - LOAD_TEST_ADDRESS, PRECOMPILES_CONTRACT_ADDRESS, - }, - web3::testonly::create_test_tx_sender, -}; - -/// Initial pivot multiplier empirically sufficient for most tx types. -const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; - -#[tokio::test] -async fn getting_nonce_for_account() { - let l2_chain_id = L2ChainId::default(); - let test_address = Address::repeat_byte(1); - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - // Manually insert a nonce for the address. - let nonce_key = get_nonce_key(&test_address); - let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(123)); - storage - .storage_logs_dal() - .append_storage_logs(L2BlockNumber(0), &[nonce_log]) - .await - .unwrap(); - - let tx_executor = MockOneshotExecutor::default(); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(123)); - - // Insert another L2 block with a new nonce log. - storage - .blocks_dal() - .insert_l2_block(&create_l2_block(1)) - .await - .unwrap(); - let nonce_log = StorageLog { - value: H256::from_low_u64_be(321), - ..nonce_log - }; - storage - .storage_logs_dal() - .insert_storage_logs(L2BlockNumber(1), &[nonce_log]) - .await - .unwrap(); - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(321)); - let missing_address = Address::repeat_byte(0xff); - let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); - assert_eq!(nonce, Nonce(0)); -} - -#[tokio::test] -async fn getting_nonce_for_account_after_snapshot_recovery() { - const SNAPSHOT_L2_BLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); - - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - let test_address = Address::repeat_byte(1); - let other_address = Address::repeat_byte(2); - let nonce_logs = [ - StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), - StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), - ]; - prepare_recovery_snapshot( - &mut storage, - L1BatchNumber(23), - SNAPSHOT_L2_BLOCK_NUMBER, - &nonce_logs, - ) - .await; - - let l2_chain_id = L2ChainId::default(); - let tx_executor = MockOneshotExecutor::default(); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - storage - .blocks_dal() - .insert_l2_block(&create_l2_block(SNAPSHOT_L2_BLOCK_NUMBER.0 + 1)) - .await - .unwrap(); - let new_nonce_logs = vec![StorageLog::new_write_log( - get_nonce_key(&test_address), - H256::from_low_u64_be(321), - )]; - storage - .storage_logs_dal() - .insert_storage_logs(SNAPSHOT_L2_BLOCK_NUMBER + 1, &new_nonce_logs) - .await - .unwrap(); - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(321)); - let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); - assert_eq!(nonce, Nonce(25)); - let missing_address = Address::repeat_byte(0xff); - let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); - assert_eq!(nonce, Nonce(0)); -} - -#[tokio::test] -async fn submitting_tx_requires_one_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - - let l2_chain_id = L2ChainId::default(); - let fee_input = MockBatchFeeParamsProvider::default() - .get_batch_fee_input_scaled(1.0, 1.0) - .await - .unwrap(); - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = create_l2_transaction(base_fee, gas_per_pubdata); - let tx_hash = tx.hash(); - - // Manually set sufficient balance for the tx initiator. - let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); - let storage_log = StorageLog::new_write_log(balance_key, u256_to_h256(U256::one() << 64)); - storage - .storage_logs_dal() - .append_storage_logs(L2BlockNumber(0), &[storage_log]) - .await - .unwrap(); - drop(storage); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_tx_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { output: vec![] } - }); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - let submission_result = tx_sender.submit_tx(tx).await.unwrap(); - assert_matches!(submission_result.0, L2TxSubmissionResult::Added); - - let mut storage = pool.connection().await.unwrap(); - storage - .transactions_web3_dal() - .get_transaction_by_hash(tx_hash, l2_chain_id) - .await - .unwrap() - .expect("transaction is not persisted"); -} - -#[tokio::test] -async fn eth_call_requires_single_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - let genesis_params = GenesisParams::mock(); - insert_genesis_batch(&mut storage, &genesis_params) - .await - .unwrap(); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) - .await - .unwrap(); - let block_id = api::BlockId::Number(api::BlockNumber::Latest); - let block_args = BlockArgs::new(&mut storage, block_id, &start_info) - .await - .unwrap(); - drop(storage); - - let tx = create_l2_transaction(10, 100); - let tx_hash = tx.hash(); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_call_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { - output: b"success!".to_vec(), - } - }); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender( - pool.clone(), - genesis_params.config().l2_chain_id, - tx_executor, - ) - .await; - let call_overrides = CallOverrides { - enforced_base_fee: None, - }; - let output = tx_sender - .eth_call(block_args, call_overrides, tx, None) - .await - .unwrap(); - assert_eq!(output, b"success!"); -} - -async fn create_real_tx_sender() -> TxSender { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - let genesis_params = GenesisParams::mock(); - insert_genesis_batch(&mut storage, &genesis_params) - .await - .unwrap(); - drop(storage); - - let genesis_config = genesis_params.config(); - let executor_options = SandboxExecutorOptions::new( - genesis_config.l2_chain_id, - AccountTreeId::new(genesis_config.fee_account), - u32::MAX, - ) - .await - .unwrap(); - - let pg_caches = PostgresStorageCaches::new(1, 1); - let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); - create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) - .await - .0 -} - -#[tokio::test] -async fn initial_gas_estimation_is_somewhat_accurate() { - let tx_sender = create_real_tx_sender().await; - - let alice = K256PrivateKey::random(); - let transfer_value = U256::from(1_000_000_000); - let account_overrides = OverrideAccount { - balance: Some(transfer_value * 2), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); - // fee params don't matter; we adjust via `adjust_transaction_fee()` - let tx = alice.create_transfer(transfer_value, 55, 555); - - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - let initial_estimate = estimator.initialize().await.unwrap(); - assert!(initial_estimate.gas_charged_for_pubdata > 0); - assert!(initial_estimate.operator_overhead > 0); - let total_gas_charged = initial_estimate.total_gas_charged.unwrap(); - assert!( - total_gas_charged - > initial_estimate.gas_charged_for_pubdata + initial_estimate.operator_overhead, - "{initial_estimate:?}" - ); - - // Check that a transaction fails if supplied with the lower bound. - let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() - + initial_estimate.operator_overhead; - assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); - let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); - - // A slightly larger limit should work. - let initial_pivot = total_gas_charged * 64 / 63; - let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); -} - -const LOAD_TEST_CASES: TestCases = test_casing::cases! {[ - LoadnextContractExecutionParams::default(), - // No storage modification - LoadnextContractExecutionParams { - writes: 0, - events: 0, - ..LoadnextContractExecutionParams::default() - }, - // Moderately deep recursion (very deep recursion is tested separately) - LoadnextContractExecutionParams { - recursive_calls: 10, - ..LoadnextContractExecutionParams::default() - }, - // No deploys - LoadnextContractExecutionParams { - deploys: 0, - ..LoadnextContractExecutionParams::default() - }, - // Lots of deploys - LoadnextContractExecutionParams { - deploys: 10, - ..LoadnextContractExecutionParams::default() - }, -]}; - -#[test_casing(5, LOAD_TEST_CASES)] -#[tokio::test] -async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractExecutionParams) { - let alice = K256PrivateKey::random(); - // Set the array length in the load test contract to 100, so that reads don't fail. - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(tx_params); - - test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; -} - -#[test_casing(2, [false, true])] -#[tokio::test] -async fn initial_estimate_for_deep_recursion(with_reads: bool) { - let alice = K256PrivateKey::random(); - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - - // Reads are chosen because they represent the worst case. Reads don't influence the amount of pubdata; - // i.e., they don't make it easier to execute a transaction because of additional gas reserved for pubdata. - // OTOH, reads still increase the amount of computational gas used on each nested call. - // - // Initial pivot multipliers below are the smallest ones with 0.1 precision. `DEFAULT_MULTIPLIER` works for smaller - // recursion depths because the transaction emits enough pubdata to cover gas deductions due to the 63/64 rule. - let depths_and_multipliers: &[_] = if with_reads { - &[(25, DEFAULT_MULTIPLIER), (50, 1.2), (75, 1.4), (100, 1.7)] - } else { - &[ - (50, DEFAULT_MULTIPLIER), - (75, 1.2), - (100, 1.4), - (125, 1.7), - (150, 2.1), - ] - }; - for &(recursion_depth, multiplier) in depths_and_multipliers { - println!("Testing recursion depth {recursion_depth}"); - let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { - recursive_calls: recursion_depth, - reads: if with_reads { 10 } else { 0 }, - ..LoadnextContractExecutionParams::empty() - }); - test_initial_estimate(state_override.clone(), tx, multiplier).await; - } -} - -#[tokio::test] -async fn initial_estimate_for_deep_recursion_with_large_bytecode() { - let alice = K256PrivateKey::random(); - let mut contract_bytecode = get_loadnext_contract().bytecode; - inflate_bytecode(&mut contract_bytecode, 50_000); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { - recursive_calls: 100, - ..LoadnextContractExecutionParams::empty() - }); - - test_initial_estimate(state_override, tx, 1.35).await; -} - -/// Tests the lower bound and initial pivot extracted from the initial estimate (one with effectively infinite gas amount). -/// Returns the VM result for a VM run with the initial pivot. -async fn test_initial_estimate( - state_override: StateOverride, - tx: L2Tx, - initial_pivot_multiplier: f64, -) -> VmExecutionResultAndLogs { - let tx_sender = create_real_tx_sender().await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - let initial_estimate = estimator.initialize().await.unwrap(); - - let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() - + initial_estimate.operator_overhead; - let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); - - // A slightly larger limit should work. - let initial_pivot = - (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; - let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); - vm_result -} - -async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { - let tx_sender = create_real_tx_sender().await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - estimator.initialize().await.unwrap_err() -} - -/// Estimates both transactions with initial writes and cleanup. -#[test_casing(4, [10, 50, 200, 1_000])] -#[tokio::test] -async fn initial_estimate_for_expensive_contract(write_count: usize) { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_expensive_contract_bytecode(); - let mut contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides.clone(), - )])); - let tx = alice.create_expensive_tx(write_count); - - let vm_result = test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; - - let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { - (*log.log.key.address() == EXPENSIVE_CONTRACT_ADDRESS) - .then_some((*log.log.key.key(), log.log.value)) - }); - let contract_logs: HashMap<_, _> = contract_logs.collect(); - assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); - contract_overrides.state = Some(OverrideState::StateDiff(contract_logs)); - - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides, - )])); - let tx = alice.create_expensive_cleanup_tx(); - - test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; -} - -#[tokio::test] -async fn initial_estimate_for_code_oracle_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_precompiles_contract_bytecode(); - let contract_bytecode_hash = hash_bytecode(&contract_bytecode); - let contract_keccak_hash = H256(keccak256(&contract_bytecode)); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - // Add another contract that is never executed, but has a large bytecode. - let huge_contact_address = Address::repeat_byte(23); - let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); - let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); - let huge_contract_overrides = OverrideAccount { - code: Some(Bytecode::new(huge_contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([ - (PRECOMPILES_CONTRACT_ADDRESS, contract_overrides), - (huge_contact_address, huge_contract_overrides), - ])); - - // Test contracts that are already decommitted when requested from the precompiles test contract. - let genesis_params = GenesisParams::mock(); - let code_oracle_bytecode = genesis_params - .system_contracts() - .iter() - .find_map(|contract| { - (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) - }) - .expect("no code oracle"); - let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); - let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); - - let warm_bytecode_hashes = [ - (code_oracle_bytecode_hash, code_oracle_keccak_hash), - (contract_bytecode_hash, contract_keccak_hash), - ]; - let mut decomitter_stats = 0.0; - for (hash, keccak_hash) in warm_bytecode_hashes { - println!("Testing bytecode: {hash:?}"); - let tx = alice.create_code_oracle_tx(hash, keccak_hash); - let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; - let stats = &vm_result.statistics.circuit_statistic; - decomitter_stats = stats.code_decommitter.max(decomitter_stats); - } - assert!(decomitter_stats > 0.0); - - println!("Testing large bytecode"); - let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); - let vm_result = test_initial_estimate(state_override, tx, 1.05).await; - // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones - let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; - assert!( - new_decomitter_stats > decomitter_stats * 1.5, - "old={decomitter_stats}, new={new_decomitter_stats}" - ); -} - -#[tokio::test] -async fn initial_estimate_with_large_free_bytecode() { - let alice = K256PrivateKey::random(); - let mut contract_bytecode = read_precompiles_contract_bytecode(); - inflate_bytecode(&mut contract_bytecode, 50_000); - let contract_bytecode_hash = hash_bytecode(&contract_bytecode); - let contract_keccak_hash = H256(keccak256(&contract_bytecode)); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([( - PRECOMPILES_CONTRACT_ADDRESS, - contract_overrides, - )])); - // Ask the test contract to decommit itself. This should refund the decommit costs, but it will be charged at first. - let tx = alice.create_code_oracle_tx(contract_bytecode_hash, contract_keccak_hash); - test_initial_estimate(state_override, tx, 1.05).await; -} - -#[tokio::test] -async fn revert_during_initial_estimate() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_counter_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - COUNTER_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_reverting_counter_tx(); - let err = test_initial_estimate_error(state_override, tx).await; - let SubmitTxError::ExecutionReverted(err, _) = err else { - panic!("Unexpected error: {err:?}"); - }; - assert_eq!(err, "This method always reverts"); -} - -#[tokio::test] -async fn out_of_gas_during_initial_estimate() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_infinite_loop_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - INFINITE_LOOP_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_infinite_loop_tx(); - let err = test_initial_estimate_error(state_override, tx).await; - // Unfortunately, we don't provide human-readable out-of-gas errors at the time - assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); -} - -#[tokio::test] -async fn insufficient_funds_error_for_transfer() { - let tx_sender = create_real_tx_sender().await; - - let alice = K256PrivateKey::random(); - let transfer_value = 1_000_000_000.into(); - // fee params don't matter; they should be overwritten by the estimation logic - let tx = alice.create_transfer(transfer_value, 55, 555); - let fee_scale_factor = 1.0; - // Without overrides, the transaction should fail because of insufficient balance. - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - 1_000, - None, - BinarySearchKind::Full, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::InsufficientFundsForTransfer); -} - -async fn test_estimating_gas( - state_override: StateOverride, - tx: L2Tx, - acceptable_overestimation: u64, -) { - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let fee = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - BinarySearchKind::Full, - ) - .await - .unwrap(); - // Sanity-check gas limit - let gas_limit_after_full_search = u64::try_from(fee.gas_limit).unwrap(); - assert!( - (10_000..10_000_000).contains(&gas_limit_after_full_search), - "{fee:?}" - ); - - let fee = tx_sender - .get_txs_fee_in_wei( - tx.into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - BinarySearchKind::Optimized, - ) - .await - .unwrap(); - let gas_limit_after_optimized_search = u64::try_from(fee.gas_limit).unwrap(); - - let diff = gas_limit_after_full_search.abs_diff(gas_limit_after_optimized_search); - assert!( - diff <= acceptable_overestimation, - "full={gas_limit_after_full_search}, optimized={gas_limit_after_optimized_search}" - ); -} - -#[test_casing(3, [0, 100, 1_000])] -#[tokio::test] -async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { - let alice = K256PrivateKey::random(); - let transfer_value = 1_000_000_000.into(); - let account_overrides = OverrideAccount { - balance: Some(transfer_value * 2), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); - // fee params don't matter; they should be overwritten by the estimation logic - let tx = alice.create_transfer(transfer_value, 55, 555); - - test_estimating_gas(state_override, tx, acceptable_overestimation).await; -} - -#[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] -#[tokio::test] -async fn estimating_gas_for_load_test_tx( - tx_params: LoadnextContractExecutionParams, - acceptable_overestimation: u64, -) { - let alice = K256PrivateKey::random(); - // Set the array length in the load test contract to 100, so that reads don't fail. - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(tx_params); - - test_estimating_gas(state_override, tx, acceptable_overestimation).await; -} - -#[test_casing(4, [10, 50, 100, 200])] -#[tokio::test] -async fn estimating_gas_for_expensive_txs(write_count: usize) { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_expensive_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides.clone(), - )])); - let tx = alice.create_expensive_tx(write_count); - - test_estimating_gas(state_override, tx, 0).await; -} - -#[tokio::test] -async fn estimating_gas_for_code_oracle_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_precompiles_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - // Add another contract that is never executed, but has a large bytecode. - let huge_contact_address = Address::repeat_byte(23); - let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); - let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); - let huge_contract_overrides = OverrideAccount { - code: Some(Bytecode::new(huge_contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([ - (PRECOMPILES_CONTRACT_ADDRESS, contract_overrides), - (huge_contact_address, huge_contract_overrides), - ])); - let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); - - test_estimating_gas(state_override, tx, 0).await; -} - -#[tokio::test] -async fn estimating_gas_for_reverting_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_counter_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - COUNTER_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_reverting_counter_tx(); - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let acceptable_overestimation = 0; - for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - binary_search_kind, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::ExecutionReverted(..)); - } -} - -#[tokio::test] -async fn estimating_gas_for_infinite_loop_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_infinite_loop_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - INFINITE_LOOP_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_infinite_loop_tx(); - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let acceptable_overestimation = 0; - for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - binary_search_kind, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); - } -} diff --git a/core/node/api_server/src/tx_sender/tests/call.rs b/core/node/api_server/src/tx_sender/tests/call.rs new file mode 100644 index 00000000000..bdddb8e3895 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/call.rs @@ -0,0 +1,253 @@ +//! Tests for `eth_call`. + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::{ + api::state_override::OverrideAccount, transaction_request::CallRequest, K256PrivateKey, +}; + +use super::*; +use crate::testonly::{decode_u256_output, Call3Result, Call3Value, StateBuilder, TestAccount}; + +#[tokio::test] +async fn eth_call_requires_single_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_call_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { + output: b"success!".to_vec(), + } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender( + pool.clone(), + genesis_params.config().l2_chain_id, + tx_executor, + ) + .await; + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + let output = tx_sender + .eth_call(block_args, call_overrides, tx, None) + .await + .unwrap(); + assert_eq!(output, b"success!"); +} + +async fn test_call( + tx_sender: &TxSender, + state_override: StateOverride, + mut call: CallRequest, +) -> Result, SubmitTxError> { + call.gas = call.gas.max(Some(10_000_000.into())); + let call = L2Tx::from_request(call.into(), usize::MAX).unwrap(); + + let mut storage = tx_sender + .0 + .replica_connection_pool + .connection() + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + + tx_sender + .eth_call(block_args, call_overrides, call, Some(state_override)) + .await +} + +#[tokio::test] +async fn eth_call_with_balance() { + let alice = K256PrivateKey::random(); + let initial_balance = 123_456_789.into(); + let account_overrides = OverrideAccount { + balance: Some(initial_balance), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let call = alice.query_base_token_balance(); + let output = test_call(&tx_sender, state_override, call).await.unwrap(); + assert_eq!(decode_u256_output(&output), initial_balance); +} + +#[tokio::test] +async fn eth_call_with_transfer() { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let initial_balance = transfer_value * 5 / 3; + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_balance(alice.address(), initial_balance) + .build(); + + let transfer = alice.create_transfer(transfer_value); + let multicall = alice.multicall_with_value( + transfer_value, + &[transfer.into(), alice.query_base_token_balance().into()], + ); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + assert_eq!(call_results.len(), 2); + assert!( + call_results[0].success && call_results[1].success, + "{call_results:?}" + ); + assert!(call_results[0].return_data.is_empty(), "{call_results:?}"); + + let balance = call_results[1].as_u256(); + // The bootloader doesn't compute gas refunds in the call mode, so the equality is exact + assert_eq!(balance, initial_balance - transfer_value); +} + +#[tokio::test] +async fn eth_call_with_counter() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(42).build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call( + &tx_sender, + state_override.clone(), + alice.query_counter_value(), + ) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 42.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), false).into(); + let output = test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 45.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), true).into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!( + err, + SubmitTxError::ExecutionReverted(msg, _) if msg.contains("This method always reverts") + ); +} + +#[tokio::test] +async fn eth_call_with_counter_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_counter_contract(0) + .build(); + + let multicall = alice.multicall_with_value( + 0.into(), + &[ + alice.create_counter_tx(1.into(), false).into(), + Call3Value::from(alice.create_counter_tx(2.into(), true)).allow_failure(), + alice.query_counter_value().into(), + alice.create_counter_tx(3.into(), false).into(), + ], + ); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + + assert_eq!( + call_results + .iter() + .map(|result| result.success) + .collect::>(), + [true, false, true, true] + ); + let counter_values: Vec<_> = call_results + .iter() + .filter_map(|result| { + if !result.success { + return None; + } + Some(decode_u256_output(&result.return_data).as_u32()) + }) + .collect(); + assert_eq!(counter_values, [1, 1, 4]); +} + +#[tokio::test] +async fn eth_call_out_of_gas() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let tx_as_call = alice.create_infinite_loop_tx().into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); +} + +#[tokio::test] +async fn eth_call_with_load_test_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + // Deploys (naturally) don't work for calls, hence a separate set of test cases. + let load_test_cases_for_call = [ + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + deploys: 0, + recursive_calls: 20, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + reads: 100, + writes: 100, + ..LoadnextContractExecutionParams::empty() + }, + ]; + + for tx_params in load_test_cases_for_call { + println!("Executing {tx_params:?}"); + let tx_as_call = alice.create_load_test_tx(tx_params).into(); + test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + } +} diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs new file mode 100644 index 00000000000..086313a8562 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -0,0 +1,466 @@ +//! Tests for gas estimation (mostly with the real oneshot VM executor). + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; +use zksync_system_constants::CODE_ORACLE_ADDRESS; +use zksync_types::{ + api::state_override::{OverrideAccount, OverrideState}, + web3::keccak256, + K256PrivateKey, +}; +use zksync_utils::bytecode::hash_bytecode; + +use super::*; +use crate::{ + testonly::{StateBuilder, TestAccount}, + tx_sender::gas_estimation::GasEstimator, +}; + +/// Initial pivot multiplier empirically sufficient for most tx types. +const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; + +#[tokio::test] +async fn initial_gas_estimation_is_somewhat_accurate() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let alice = K256PrivateKey::random(); + let transfer_value = U256::from(1_000_000_000); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + assert!(initial_estimate.gas_charged_for_pubdata > 0); + assert!(initial_estimate.operator_overhead > 0); + let total_gas_charged = initial_estimate.total_gas_charged.unwrap(); + assert!( + total_gas_charged + > initial_estimate.gas_charged_for_pubdata + initial_estimate.operator_overhead, + "{initial_estimate:?}" + ); + + // Check that a transaction fails if supplied with the lower bound. + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = total_gas_charged * 64 / 63; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let alice = K256PrivateKey::random(); + // Set the array length in the load test contract to 100, so that reads don't fail. + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn initial_estimate_for_deep_recursion(with_reads: bool) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + // Reads are chosen because they represent the worst case. Reads don't influence the amount of pubdata; + // i.e., they don't make it easier to execute a transaction because of additional gas reserved for pubdata. + // OTOH, reads still increase the amount of computational gas used on each nested call. + // + // Initial pivot multipliers below are the smallest ones with 0.1 precision. `DEFAULT_MULTIPLIER` works for smaller + // recursion depths because the transaction emits enough pubdata to cover gas deductions due to the 63/64 rule. + let depths_and_multipliers: &[_] = if with_reads { + &[(25, DEFAULT_MULTIPLIER), (50, 1.2), (75, 1.4), (100, 1.7)] + } else { + &[ + (50, DEFAULT_MULTIPLIER), + (75, 1.2), + (100, 1.4), + (125, 1.7), + (150, 2.1), + ] + }; + for &(recursion_depth, multiplier) in depths_and_multipliers { + println!("Testing recursion depth {recursion_depth}"); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: recursion_depth, + reads: if with_reads { 10 } else { 0 }, + ..LoadnextContractExecutionParams::empty() + }); + test_initial_estimate(state_override.clone(), tx, multiplier).await; + } +} + +#[tokio::test] +async fn initial_estimate_for_deep_recursion_with_large_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_load_test_contract() + .inflate_bytecode(StateBuilder::LOAD_TEST_ADDRESS, 50_000) + .build(); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: 100, + ..LoadnextContractExecutionParams::empty() + }); + + test_initial_estimate(state_override, tx, 1.35).await; +} + +/// Tests the lower bound and initial pivot extracted from the initial estimate (one with effectively infinite gas amount). +/// Returns the VM result for a VM run with the initial pivot. +async fn test_initial_estimate( + state_override: StateOverride, + tx: L2Tx, + initial_pivot_multiplier: f64, +) -> VmExecutionResultAndLogs { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = + (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + vm_result +} + +async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + estimator.initialize().await.unwrap_err() +} + +/// Estimates both transactions with initial writes and cleanup. +#[test_casing(4, [10, 50, 200, 1_000])] +#[tokio::test] +async fn initial_estimate_for_expensive_contract(write_count: usize) { + let alice = K256PrivateKey::random(); + let mut state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + + let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { + (*log.log.key.address() == StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .then_some((*log.log.key.key(), log.log.value)) + }); + let contract_logs: HashMap<_, _> = contract_logs.collect(); + assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); + + state_override + .get_mut(&StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::StateDiff(contract_logs)); + let tx = alice.create_expensive_cleanup_tx(); + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[tokio::test] +async fn initial_estimate_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Test contracts that are already decommitted when requested from the precompiles test contract. + let genesis_params = GenesisParams::mock(); + let code_oracle_bytecode = genesis_params + .system_contracts() + .iter() + .find_map(|contract| { + (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) + }) + .expect("no code oracle"); + let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); + let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); + + let warm_bytecode_hashes = [ + (code_oracle_bytecode_hash, code_oracle_keccak_hash), + (contract_bytecode_hash, contract_keccak_hash), + ]; + let mut decomitter_stats = 0.0; + for (hash, keccak_hash) in warm_bytecode_hashes { + println!("Testing bytecode: {hash:?}"); + let tx = alice.create_code_oracle_tx(hash, keccak_hash); + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + let stats = &vm_result.statistics.circuit_statistic; + decomitter_stats = stats.code_decommitter.max(decomitter_stats); + } + assert!(decomitter_stats > 0.0); + + println!("Testing large bytecode"); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + let vm_result = test_initial_estimate(state_override, tx, 1.05).await; + // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones + let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; + assert!( + new_decomitter_stats > decomitter_stats * 1.5, + "old={decomitter_stats}, new={new_decomitter_stats}" + ); +} + +#[tokio::test] +async fn initial_estimate_with_large_free_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_precompiles_contract() + .inflate_bytecode(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS, 50_000) + .build(); + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Ask the test contract to decommit itself. This should refund the decommit costs, but it will be charged at first. + let tx = alice.create_code_oracle_tx(contract_bytecode_hash, contract_keccak_hash); + test_initial_estimate(state_override, tx, 1.05).await; +} + +#[tokio::test] +async fn revert_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let err = test_initial_estimate_error(state_override, tx).await; + let SubmitTxError::ExecutionReverted(err, _) = err else { + panic!("Unexpected error: {err:?}"); + }; + assert_eq!(err, "This method always reverts"); +} + +#[tokio::test] +async fn out_of_gas_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let err = test_initial_estimate_error(state_override, tx).await; + // Unfortunately, we don't provide human-readable out-of-gas errors at the time + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); +} + +#[tokio::test] +async fn insufficient_funds_error_for_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let alice = K256PrivateKey::random(); + let tx = alice.create_transfer(1_000_000_000.into()); + let fee_scale_factor = 1.0; + // Without overrides, the transaction should fail because of insufficient balance. + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + fee_scale_factor, + 1_000, + None, + BinarySearchKind::Full, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::InsufficientFundsForTransfer); +} + +async fn test_estimating_gas( + state_override: StateOverride, + tx: L2Tx, + acceptable_overestimation: u64, +) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let fee_scale_factor = 1.0; + let fee = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Full, + ) + .await + .unwrap(); + // Sanity-check gas limit + let gas_limit_after_full_search = u64::try_from(fee.gas_limit).unwrap(); + assert!( + (10_000..10_000_000).contains(&gas_limit_after_full_search), + "{fee:?}" + ); + + let fee = tx_sender + .get_txs_fee_in_wei( + tx.into(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Optimized, + ) + .await + .unwrap(); + let gas_limit_after_optimized_search = u64::try_from(fee.gas_limit).unwrap(); + + let diff = gas_limit_after_full_search.abs_diff(gas_limit_after_optimized_search); + assert!( + diff <= acceptable_overestimation, + "full={gas_limit_after_full_search}, optimized={gas_limit_after_optimized_search}" + ); +} + +#[test_casing(3, [0, 100, 1_000])] +#[tokio::test] +async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] +#[tokio::test] +async fn estimating_gas_for_load_test_tx( + tx_params: LoadnextContractExecutionParams, + acceptable_overestimation: u64, +) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(4, [10, 50, 100, 200])] +#[tokio::test] +async fn estimating_gas_for_expensive_txs(write_count: usize) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_reverting_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); + } +} + +#[tokio::test] +async fn estimating_gas_for_infinite_loop_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); + } +} diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs new file mode 100644 index 00000000000..3d48e320abc --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -0,0 +1,161 @@ +//! Tests for the transaction sender. + +use test_casing::TestCases; +use zksync_contracts::test_contracts::LoadnextContractExecutionParams; +use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; +use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_types::{get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; +use zksync_vm_executor::oneshot::MockOneshotExecutor; + +use super::*; +use crate::web3::testonly::create_test_tx_sender; + +mod call; +mod gas_estimation; +mod send_tx; + +const LOAD_TEST_CASES: TestCases = test_casing::cases! {[ + LoadnextContractExecutionParams::default(), + // No storage modification + LoadnextContractExecutionParams { + writes: 0, + events: 0, + ..LoadnextContractExecutionParams::default() + }, + // Moderately deep recursion (very deep recursion is tested separately) + LoadnextContractExecutionParams { + recursive_calls: 10, + ..LoadnextContractExecutionParams::default() + }, + // No deploys + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + // Lots of deploys + LoadnextContractExecutionParams { + deploys: 10, + ..LoadnextContractExecutionParams::default() + }, +]}; + +#[tokio::test] +async fn getting_nonce_for_account() { + let l2_chain_id = L2ChainId::default(); + let test_address = Address::repeat_byte(1); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + // Manually insert a nonce for the address. + let nonce_key = get_nonce_key(&test_address); + let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(123)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[nonce_log]) + .await + .unwrap(); + + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(123)); + + // Insert another L2 block with a new nonce log. + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(1)) + .await + .unwrap(); + let nonce_log = StorageLog { + value: H256::from_low_u64_be(321), + ..nonce_log + }; + storage + .storage_logs_dal() + .insert_storage_logs(L2BlockNumber(1), &[nonce_log]) + .await + .unwrap(); + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(321)); + let missing_address = Address::repeat_byte(0xff); + let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); + assert_eq!(nonce, Nonce(0)); +} + +#[tokio::test] +async fn getting_nonce_for_account_after_snapshot_recovery() { + const SNAPSHOT_L2_BLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); + + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + let test_address = Address::repeat_byte(1); + let other_address = Address::repeat_byte(2); + let nonce_logs = [ + StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), + StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), + ]; + prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(23), + SNAPSHOT_L2_BLOCK_NUMBER, + &nonce_logs, + ) + .await; + + let l2_chain_id = L2ChainId::default(); + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(SNAPSHOT_L2_BLOCK_NUMBER.0 + 1)) + .await + .unwrap(); + let new_nonce_logs = vec![StorageLog::new_write_log( + get_nonce_key(&test_address), + H256::from_low_u64_be(321), + )]; + storage + .storage_logs_dal() + .insert_storage_logs(SNAPSHOT_L2_BLOCK_NUMBER + 1, &new_nonce_logs) + .await + .unwrap(); + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(321)); + let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); + assert_eq!(nonce, Nonce(25)); + let missing_address = Address::repeat_byte(0xff); + let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); + assert_eq!(nonce, Nonce(0)); +} + +async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + drop(storage); + + let genesis_config = genesis_params.config(); + let executor_options = SandboxExecutorOptions::new( + genesis_config.l2_chain_id, + AccountTreeId::new(genesis_config.fee_account), + u32::MAX, + ) + .await + .unwrap(); + + let pg_caches = PostgresStorageCaches::new(1, 1); + let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); + create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) + .await + .0 +} diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs new file mode 100644 index 00000000000..678b88dab94 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -0,0 +1,293 @@ +//! Tests for sending raw transactions. + +use assert_matches::assert_matches; +use test_casing::test_casing; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_fee_model::MockBatchFeeParamsProvider; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::K256PrivateKey; + +use super::*; +use crate::testonly::{StateBuilder, TestAccount}; + +#[tokio::test] +async fn submitting_tx_requires_one_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + let tx_hash = tx.hash(); + + // Manually set sufficient balance for the tx initiator. + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_tx_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { output: vec![] } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + let submission_result = tx_sender.submit_tx(tx).await.unwrap(); + assert_matches!(submission_result.0, L2TxSubmissionResult::Added); + + let mut storage = pool.connection().await.unwrap(); + storage + .transactions_web3_dal() + .get_transaction_by_hash(tx_hash, l2_chain_id) + .await + .unwrap() + .expect("transaction is not persisted"); +} + +#[tokio::test] +async fn nonce_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + drop(storage); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let mut tx = create_l2_transaction(55, 555); + + tx_sender.validate_account_nonce(&tx).await.unwrap(); + // There should be some leeway with the nonce validation. + tx.common_data.nonce = Nonce(1); + tx_sender.validate_account_nonce(&tx).await.unwrap(); + + tx.common_data.nonce = Nonce(10_000); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 0 + ); + + let mut storage = pool.connection().await.unwrap(); + let nonce_key = get_nonce_key(&tx.initiator_account()); + let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(42)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[nonce_log]) + .await + .unwrap(); + drop(storage); + + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 42 + ); + + tx.common_data.nonce = Nonce(5); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooLow(from, _, actual) if actual == 5 && from == 42 + ); +} + +#[tokio::test] +async fn fee_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + // Sanity check: validation should succeed with reasonable fee params. + tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap(); + + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = 100.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::IntrinsicGas); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = u64::MAX.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::GasLimitIsTooBig); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_fee_per_gas = 1.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxFeePerGasTooLow); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_priority_fee_per_gas = tx.common_data.fee.max_fee_per_gas * 2; + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxPriorityFeeGreaterThanMaxFee); + } +} + +#[tokio::test] +async fn sending_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + + // Manually set sufficient balance for the tx initiator. + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let transfer = alice.create_transfer(1_000_000_000.into()); + let (sub_result, vm_result) = tx_sender.submit_tx(transfer).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_transfer_with_insufficient_balance() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let transfer = alice.create_transfer(transfer_value); + let err = tx_sender.submit_tx(transfer).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NotEnoughBalanceForFeeValue(balance, _, value) if balance.is_zero() + && value == transfer_value + ); +} + +#[tokio::test] +async fn sending_transfer_with_incorrect_signature() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut transfer = alice.create_transfer(transfer_value); + transfer.execute.value = transfer_value / 2; // This should invalidate tx signature + let err = tx_sender.submit_tx(transfer).await.unwrap_err(); + assert_matches!(err, SubmitTxError::ValidationFailed(_)); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_load_test_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_load_test_tx(tx_params); + let (sub_result, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_reverting_transaction() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_counter_contract(0) + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_counter_tx(1.into(), true); + let (_, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } if output.to_string().contains("This method always reverts") + ); +} + +#[tokio::test] +async fn sending_transaction_out_of_gas() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_infinite_loop_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_infinite_loop_tx(); + let (_, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); +} From f6d86bd7935a1cdbb528b13437424031fda3cb8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Mon, 7 Oct 2024 14:28:00 +0200 Subject: [PATCH 011/140] feat(eth-sender): add time_in_mempool_cap config (#3018) Configuration parameter for time_in_mempool_cap, cap for time_in_mempool in eth-sender fee model + default values for parameter_a and parameter_b for GasAdjuster --- core/lib/config/src/configs/eth_sender.rs | 22 +++++++++++++++++++ core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/eth_sender.rs | 2 ++ core/lib/protobuf_config/src/eth.rs | 8 +++++-- .../src/proto/config/eth_sender.proto | 1 + core/node/eth_sender/src/eth_fees_oracle.rs | 20 +++++++++++------ core/node/eth_sender/src/eth_tx_manager.rs | 14 ++++++++---- .../src/l1_gas_price/gas_adjuster/mod.rs | 6 ++--- core/node/fee_model/src/l1_gas_price/mod.rs | 2 +- etc/env/base/eth_sender.toml | 4 ++-- 10 files changed, 61 insertions(+), 19 deletions(-) diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 7e6ef2244cb..3a1a0505728 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -42,6 +42,7 @@ impl EthConfig { pubdata_sending_mode: PubdataSendingMode::Calldata, tx_aggregation_paused: false, tx_aggregation_only_prove_and_execute: false, + time_in_mempool_in_l1_blocks_cap: 1800, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 1000000000, @@ -127,6 +128,10 @@ pub struct SenderConfig { /// special mode specifically for gateway migration to decrease number of non-executed batches #[serde(default = "SenderConfig::default_tx_aggregation_only_prove_and_execute")] pub tx_aggregation_only_prove_and_execute: bool, + + /// Cap of time in mempool for price calculations + #[serde(default = "SenderConfig::default_time_in_mempool_in_l1_blocks_cap")] + pub time_in_mempool_in_l1_blocks_cap: u32, } impl SenderConfig { @@ -168,6 +173,13 @@ impl SenderConfig { const fn default_tx_aggregation_only_prove_and_execute() -> bool { false } + + pub const fn default_time_in_mempool_in_l1_blocks_cap() -> u32 { + let blocks_per_hour = 3600 / 12; + // we cap it at 6h to not allow nearly infinite values when a tx is stuck for a long time + // 1,001 ^ 1800 ~= 6, so by default we cap exponential price formula at roughly median * 6 + blocks_per_hour * 6 + } } #[derive(Debug, Deserialize, Copy, Clone, PartialEq, Default)] @@ -177,8 +189,10 @@ pub struct GasAdjusterConfig { /// Number of blocks collected by GasAdjuster from which base_fee median is taken pub max_base_fee_samples: usize, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_a")] pub pricing_formula_parameter_a: f64, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_b")] pub pricing_formula_parameter_b: f64, /// Parameter by which the base fee will be multiplied for internal purposes pub internal_l1_pricing_multiplier: f64, @@ -225,4 +239,12 @@ impl GasAdjusterConfig { pub const fn default_internal_pubdata_pricing_multiplier() -> f64 { 1.0 } + + pub const fn default_pricing_formula_parameter_a() -> f64 { + 1.1 + } + + pub const fn default_pricing_formula_parameter_b() -> f64 { + 1.001 + } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 86d9545b0fb..1d90034410b 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -419,6 +419,7 @@ impl Distribution for EncodeDist { pubdata_sending_mode: PubdataSendingMode::Calldata, tx_aggregation_paused: false, tx_aggregation_only_prove_and_execute: false, + time_in_mempool_in_l1_blocks_cap: self.sample(rng), } } } diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index 64e0a89d5a4..e5132eb7d91 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -72,6 +72,7 @@ mod tests { pubdata_sending_mode: PubdataSendingMode::Calldata, tx_aggregation_only_prove_and_execute: false, tx_aggregation_paused: false, + time_in_mempool_in_l1_blocks_cap: 2000, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 20000000000, @@ -131,6 +132,7 @@ mod tests { ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG="30" ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS="4000000" ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" + ETH_SENDER_SENDER_TIME_IN_MEMPOOL_IN_L1_BLOCKS_CAP="2000" ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" ETH_SENDER_SENDER_PUBDATA_SENDING_MODE="Calldata" diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 273b7f4e344..c1d95bd30d2 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -115,6 +115,9 @@ impl ProtoRepr for proto::Sender { .parse(), tx_aggregation_only_prove_and_execute: self.tx_aggregation_paused.unwrap_or(false), tx_aggregation_paused: self.tx_aggregation_only_prove_and_execute.unwrap_or(false), + time_in_mempool_in_l1_blocks_cap: self + .time_in_mempool_in_l1_blocks_cap + .unwrap_or(Self::Type::default_time_in_mempool_in_l1_blocks_cap()), }) } @@ -147,6 +150,7 @@ impl ProtoRepr for proto::Sender { ), tx_aggregation_only_prove_and_execute: Some(this.tx_aggregation_only_prove_and_execute), tx_aggregation_paused: Some(this.tx_aggregation_paused), + time_in_mempool_in_l1_blocks_cap: Some(this.time_in_mempool_in_l1_blocks_cap), } } } @@ -161,9 +165,9 @@ impl ProtoRepr for proto::GasAdjuster { .and_then(|x| Ok((*x).try_into()?)) .context("max_base_fee_samples")?, pricing_formula_parameter_a: *required(&self.pricing_formula_parameter_a) - .context("pricing_formula_parameter_a")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_a()), pricing_formula_parameter_b: *required(&self.pricing_formula_parameter_b) - .context("pricing_formula_parameter_b")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_b()), internal_l1_pricing_multiplier: *required(&self.internal_l1_pricing_multiplier) .context("internal_l1_pricing_multiplier")?, internal_enforced_l1_gas_price: self.internal_enforced_l1_gas_price, diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index b102a08be04..6438573e08d 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -48,6 +48,7 @@ message Sender { reserved 19; reserved "proof_loading_mode"; optional bool tx_aggregation_paused = 20; // required optional bool tx_aggregation_only_prove_and_execute = 21; // required + optional uint32 time_in_mempool_in_l1_blocks_cap = 22; // optional } message GasAdjuster { diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 2c87848dcc3..ebd1568edb6 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -23,7 +23,7 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result; } @@ -32,6 +32,7 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { pub(crate) struct GasAdjusterFeesOracle { pub gas_adjuster: Arc, pub max_acceptable_priority_fee_in_gwei: u64, + pub time_in_mempool_in_l1_blocks_cap: u32, } impl GasAdjusterFeesOracle { @@ -80,11 +81,16 @@ impl GasAdjusterFeesOracle { fn calculate_fees_no_blob_sidecar( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, ) -> Result { - // cap it at 6h to not allow nearly infinite values when a tx is stuck for a long time - let capped_time_in_mempool = min(time_in_mempool, 1800); - let mut base_fee_per_gas = self.gas_adjuster.get_base_fee(capped_time_in_mempool); + // we cap it to not allow nearly infinite values when a tx is stuck for a long time + let capped_time_in_mempool_in_l1_blocks = min( + time_in_mempool_in_l1_blocks, + self.time_in_mempool_in_l1_blocks_cap, + ); + let mut base_fee_per_gas = self + .gas_adjuster + .get_base_fee(capped_time_in_mempool_in_l1_blocks); self.assert_fee_is_not_zero(base_fee_per_gas, "base"); if let Some(previous_sent_tx) = previous_sent_tx { self.verify_base_fee_not_too_low_on_resend( @@ -162,14 +168,14 @@ impl EthFeesOracle for GasAdjusterFeesOracle { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result { let has_blob_sidecar = operator_type == OperatorType::Blob; if has_blob_sidecar { self.calculate_fees_with_blob_sidecar(previous_sent_tx) } else { - self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool) + self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool_in_l1_blocks) } } } diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 0d78ab71c62..7de91a3b773 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -48,6 +48,7 @@ impl EthTxManager { let fees_oracle = GasAdjusterFeesOracle { gas_adjuster, max_acceptable_priority_fee_in_gwei: config.max_acceptable_priority_fee_in_gwei, + time_in_mempool_in_l1_blocks_cap: config.time_in_mempool_in_l1_blocks_cap, }; let l1_interface = Box::new(RealL1Interface { ethereum_gateway, @@ -111,7 +112,7 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, tx: &EthTx, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, current_block: L1BlockNumber, ) -> Result { let previous_sent_tx = storage @@ -127,7 +128,7 @@ impl EthTxManager { pubdata_price: _, } = self.fees_oracle.calculate_fees( &previous_sent_tx, - time_in_mempool, + time_in_mempool_in_l1_blocks, self.operator_type(tx), )?; @@ -601,13 +602,18 @@ impl EthTxManager { .await? { // New gas price depends on the time this tx spent in mempool. - let time_in_mempool = l1_block_numbers.latest.0 - sent_at_block; + let time_in_mempool_in_l1_blocks = l1_block_numbers.latest.0 - sent_at_block; // We don't want to return early in case resend does not succeed - // the error is logged anyway, but early returns will prevent // sending new operations. let _ = self - .send_eth_tx(storage, &tx, time_in_mempool, l1_block_numbers.latest) + .send_eth_tx( + storage, + &tx, + time_in_mempool_in_l1_blocks, + l1_block_numbers.latest, + ) .await?; } Ok(()) diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index e6842b92fdb..27cdc7f5d5e 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -317,14 +317,14 @@ impl TxParamsProvider for GasAdjuster { // smooth out base_fee increases in general. // In other words, in order to pay less fees, we are ready to wait longer. // But the longer we wait, the more we are ready to pay. - fn get_base_fee(&self, time_in_mempool: u32) -> u64 { + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64 { let a = self.config.pricing_formula_parameter_a; let b = self.config.pricing_formula_parameter_b; // Currently we use an exponential formula. // The alternative is a linear one: - // `let scale_factor = a + b * time_in_mempool as f64;` - let scale_factor = a * b.powf(time_in_mempool as f64); + // `let scale_factor = a + b * time_in_mempool_in_l1_blocks as f64;` + let scale_factor = a * b.powf(time_in_mempool_in_l1_blocks as f64); let median = self.base_fee_statistics.median(); METRICS.median_base_fee_per_gas.set(median); let new_fee = median as f64 * scale_factor; diff --git a/core/node/fee_model/src/l1_gas_price/mod.rs b/core/node/fee_model/src/l1_gas_price/mod.rs index 2a5d63089ca..e23bccf27ee 100644 --- a/core/node/fee_model/src/l1_gas_price/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/mod.rs @@ -16,7 +16,7 @@ mod main_node_fetcher; /// This trait, as a bound, should only be used in components that actually sign and send transactions. pub trait TxParamsProvider: fmt::Debug + 'static + Send + Sync { /// Returns the recommended `max_fee_per_gas` value (EIP1559). - fn get_base_fee(&self, time_in_mempool: u32) -> u64; + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64; /// Returns the recommended `max_priority_fee_per_gas` value (EIP1559). fn get_priority_fee(&self) -> u64; diff --git a/etc/env/base/eth_sender.toml b/etc/env/base/eth_sender.toml index 31fe626c87f..ad5709551c4 100644 --- a/etc/env/base/eth_sender.toml +++ b/etc/env/base/eth_sender.toml @@ -55,8 +55,8 @@ default_priority_fee_per_gas = 1_000_000_000 max_base_fee_samples = 10_000 # These two are parameters of the base_fee_per_gas formula in GasAdjuster. # The possible formulas are: -# 1. base_fee_median * (A + B * time_in_mempool) -# 2. base_fee_median * A * B ^ time_in_mempool +# 1. base_fee_median * (A + B * time_in_mempool_in_l1_blocks) +# 2. base_fee_median * A * B ^ time_in_mempool_in_l1_blocks # Currently the second is used. # To confirm, see core/bin/zksync_core/src/eth_sender/gas_adjuster/mod.rs pricing_formula_parameter_a = 1.5 From 57d658ebed017127ae9b373980218bcb1e804831 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Mon, 7 Oct 2024 10:37:00 -0300 Subject: [PATCH 012/140] fix(zkstack_cli): Fix fmt --check (#3027) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ `fmt --check` wasn't returning an error code when failing --- .../src/commands/chain/args/init/configs.rs | 2 +- .../zk_inception/src/commands/chain/init/configs.rs | 6 ++---- .../zk_inception/src/commands/chain/init/mod.rs | 4 ++-- .../zk_inception/src/commands/ecosystem/args/init.rs | 4 ++-- .../zk_inception/src/commands/ecosystem/init.rs | 2 +- zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs | 11 +++-------- 6 files changed, 11 insertions(+), 18 deletions(-) diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs index b4a49f29d21..c26ad647524 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs @@ -13,7 +13,7 @@ use crate::{ defaults::LOCAL_RPC_URL, messages::{ MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, - MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP + MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, }, }; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs index e6b9fa7117d..d0897473b83 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs @@ -1,10 +1,8 @@ use anyhow::Context; use common::logger; use config::{ - copy_configs, set_l1_rpc_url, update_from_chain_config, - ChainConfig, ContractsConfig, EcosystemConfig, - traits::SaveConfigWithBasePath, - DEFAULT_CONSENSUS_PORT, + copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, + ChainConfig, ContractsConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, }; use ethers::types::Address; use xshell::Shell; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs index 8a36f4e32b2..ac80a5b98f7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs @@ -80,7 +80,7 @@ pub async fn init( let init_configs_args = InitConfigsArgsFinal::from_chain_init_args(init_args); let mut contracts_config = init_configs(&init_configs_args, shell, ecosystem_config, chain_config).await?; - + // Fund some wallet addresses with ETH or base token (only for Localhost) distribute_eth(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; mint_base_token(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; @@ -178,6 +178,6 @@ pub async fn init( genesis(init_args.genesis_args.clone(), shell, chain_config) .await .context(MSG_GENESIS_DATABASE_ERR)?; - + Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs index 6d6ed2f3fd9..830b7b25e47 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs @@ -12,8 +12,8 @@ use crate::{ messages::{ MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEV_ARG_HELP, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, - MSG_L1_RPC_URL_PROMPT, MSG_OBSERVABILITY_HELP, MSG_OBSERVABILITY_PROMPT, - MSG_NO_PORT_REALLOCATION_HELP, + MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, + MSG_OBSERVABILITY_PROMPT, }, }; diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 67ae3162842..6b64b740aed 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -366,7 +366,7 @@ async fn init_chains( genesis_args: genesis_args.clone(), deploy_paymaster, l1_rpc_url: Some(final_init_args.ecosystem.l1_rpc_url.clone()), - no_port_reallocation: final_init_args.no_port_reallocation + no_port_reallocation: final_init_args.no_port_reallocation, }; let final_chain_init_args = chain_init_args.fill_values_with_prompt(&chain_config); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs index 3aefc15aba7..a6db4643c30 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs @@ -101,14 +101,9 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { ))); tasks.push(tokio::spawn(prettier_contracts(shell.clone(), args.check))); - futures::future::join_all(tasks) - .await - .iter() - .for_each(|res| { - if let Err(err) = res { - logger::error(err) - } - }); + for result in futures::future::join_all(tasks).await { + result??; + } } Some(Formatter::Prettier { mut targets }) => { if targets.is_empty() { From 9636e0104cfff98cdd4b4a52abc20a3e7e0d5035 Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Mon, 7 Oct 2024 15:40:16 +0200 Subject: [PATCH 013/140] docs(zk_toolbox): remove generate-sk subcommand references (#3016) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove `generate-sk` subcommand which doesn’t exist anymore. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- zk_toolbox/README.md | 6 ------ zk_toolbox/crates/zk_inception/README.md | 1 - 2 files changed, 7 deletions(-) diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index a3b44fa98b3..6197a79eec9 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -183,12 +183,6 @@ Initialize the prover: zk_inception prover init ``` -Generate setup keys: - -```bash -zk_inception prover generate-sk -``` - Run the prover: ```bash diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 904b1421e3a..7923afe4e98 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -423,7 +423,6 @@ Prover related commands ###### **Subcommands:** - `init` — Initialize prover -- `generate-sk` — Generate setup keys - `run` — Run prover - `init-bellman-cuda` — Initialize bellman-cuda From f0bfd2a88a09b0d026f128c43b4e0e7b922b5a2d Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 7 Oct 2024 17:56:31 +0300 Subject: [PATCH 014/140] chore(vm): Remove tests for old VM versions (#3015) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Removes unit test files for old VM versions in the `multivm` crate. ## Why ❔ These tests are not built (not included into the module tree of the crate), so they are just deadweight. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). --- .../src/versions/vm_1_4_1/tests/block_tip.rs | 284 ---------- .../src/versions/vm_1_4_1/tests/bootloader.rs | 56 -- .../vm_1_4_1/tests/bytecode_publishing.rs | 43 -- .../versions/vm_1_4_1/tests/call_tracer.rs | 92 ---- .../src/versions/vm_1_4_1/tests/circuits.rs | 69 --- .../src/versions/vm_1_4_1/tests/default_aa.rs | 78 --- .../src/versions/vm_1_4_1/tests/gas_limit.rs | 45 -- .../vm_1_4_1/tests/get_used_contracts.rs | 109 ---- .../vm_1_4_1/tests/invalid_bytecode.rs | 120 ----- .../vm_1_4_1/tests/is_write_initial.rs | 48 -- .../vm_1_4_1/tests/l1_tx_execution.rs | 189 ------- .../src/versions/vm_1_4_1/tests/l2_blocks.rs | 437 --------------- .../src/versions/vm_1_4_1/tests/mod.rs | 23 - .../versions/vm_1_4_1/tests/nonce_holder.rs | 188 ------- .../versions/vm_1_4_1/tests/precompiles.rs | 136 ----- .../src/versions/vm_1_4_1/tests/refunds.rs | 166 ------ .../versions/vm_1_4_1/tests/require_eip712.rs | 165 ------ .../src/versions/vm_1_4_1/tests/rollbacks.rs | 263 --------- .../vm_1_4_1/tests/simple_execution.rs | 81 --- .../vm_1_4_1/tests/tester/inner_state.rs | 131 ----- .../src/versions/vm_1_4_1/tests/tester/mod.rs | 9 - .../tests/tester/transaction_test_info.rs | 217 -------- .../vm_1_4_1/tests/tester/vm_tester.rs | 298 ----------- .../vm_1_4_1/tests/tracing_execution_error.rs | 54 -- .../src/versions/vm_1_4_1/tests/upgrade.rs | 355 ------------- .../src/versions/vm_1_4_1/tests/utils.rs | 121 ----- .../src/versions/vm_1_4_2/tests/block_tip.rs | 399 -------------- .../src/versions/vm_1_4_2/tests/bootloader.rs | 55 -- .../vm_1_4_2/tests/bytecode_publishing.rs | 40 -- .../versions/vm_1_4_2/tests/call_tracer.rs | 92 ---- .../src/versions/vm_1_4_2/tests/circuits.rs | 69 --- .../src/versions/vm_1_4_2/tests/default_aa.rs | 77 --- .../src/versions/vm_1_4_2/tests/gas_limit.rs | 44 -- .../vm_1_4_2/tests/get_used_contracts.rs | 109 ---- .../vm_1_4_2/tests/invalid_bytecode.rs | 120 ----- .../vm_1_4_2/tests/is_write_initial.rs | 45 -- .../vm_1_4_2/tests/l1_tx_execution.rs | 188 ------- .../src/versions/vm_1_4_2/tests/l2_blocks.rs | 437 --------------- .../src/versions/vm_1_4_2/tests/mod.rs | 23 - .../versions/vm_1_4_2/tests/nonce_holder.rs | 187 ------- .../versions/vm_1_4_2/tests/precompiles.rs | 135 ----- .../vm_1_4_2/tests/prestate_tracer.rs | 143 ----- .../src/versions/vm_1_4_2/tests/refunds.rs | 169 ------ .../versions/vm_1_4_2/tests/require_eip712.rs | 164 ------ .../src/versions/vm_1_4_2/tests/rollbacks.rs | 263 --------- .../vm_1_4_2/tests/simple_execution.rs | 78 --- .../vm_1_4_2/tests/tester/inner_state.rs | 131 ----- .../src/versions/vm_1_4_2/tests/tester/mod.rs | 9 - .../tests/tester/transaction_test_info.rs | 217 -------- .../vm_1_4_2/tests/tester/vm_tester.rs | 298 ----------- .../vm_1_4_2/tests/tracing_execution_error.rs | 51 -- .../src/versions/vm_1_4_2/tests/upgrade.rs | 352 ------------ .../src/versions/vm_1_4_2/tests/utils.rs | 121 ----- .../vm_boojum_integration/tests/bootloader.rs | 56 -- .../tests/bytecode_publishing.rs | 43 -- .../tests/call_tracer.rs | 92 ---- .../vm_boojum_integration/tests/circuits.rs | 66 --- .../vm_boojum_integration/tests/default_aa.rs | 76 --- .../vm_boojum_integration/tests/gas_limit.rs | 45 -- .../tests/get_used_contracts.rs | 109 ---- .../tests/invalid_bytecode.rs | 120 ----- .../tests/is_write_initial.rs | 48 -- .../tests/l1_tx_execution.rs | 139 ----- .../vm_boojum_integration/tests/l2_blocks.rs | 437 --------------- .../vm_boojum_integration/tests/mod.rs | 22 - .../tests/nonce_holder.rs | 188 ------- .../tests/precompiles.rs | 136 ----- .../vm_boojum_integration/tests/refunds.rs | 167 ------ .../tests/require_eip712.rs | 165 ------ .../vm_boojum_integration/tests/rollbacks.rs | 263 --------- .../tests/simple_execution.rs | 81 --- .../tests/tester/inner_state.rs | 130 ----- .../vm_boojum_integration/tests/tester/mod.rs | 7 - .../tests/tester/transaction_test_info.rs | 217 -------- .../tests/tester/vm_tester.rs | 295 ---------- .../tests/tracing_execution_error.rs | 54 -- .../vm_boojum_integration/tests/upgrade.rs | 362 ------------- .../vm_boojum_integration/tests/utils.rs | 111 ---- .../tests/bootloader.rs | 54 -- .../tests/bytecode_publishing.rs | 37 -- .../tests/call_tracer.rs | 87 --- .../tests/default_aa.rs | 70 --- .../vm_refunds_enhancement/tests/gas_limit.rs | 45 -- .../tests/get_used_contracts.rs | 104 ---- .../tests/invalid_bytecode.rs | 120 ----- .../tests/is_write_initial.rs | 42 -- .../tests/l1_tx_execution.rs | 125 ----- .../vm_refunds_enhancement/tests/l2_blocks.rs | 498 ----------------- .../vm_refunds_enhancement/tests/mod.rs | 20 - .../tests/nonce_holder.rs | 181 ------- .../vm_refunds_enhancement/tests/refunds.rs | 172 ------ .../tests/require_eip712.rs | 163 ------ .../vm_refunds_enhancement/tests/rollbacks.rs | 259 --------- .../tests/simple_execution.rs | 77 --- .../tests/tester/inner_state.rs | 127 ----- .../tests/tester/mod.rs | 7 - .../tests/tester/transaction_test_info.rs | 217 -------- .../tests/tester/vm_tester.rs | 300 ----------- .../tests/tracing_execution_error.rs | 53 -- .../vm_refunds_enhancement/tests/upgrade.rs | 342 ------------ .../vm_refunds_enhancement/tests/utils.rs | 106 ---- .../vm_virtual_blocks/tests/bootloader.rs | 53 -- .../tests/bytecode_publishing.rs | 37 -- .../vm_virtual_blocks/tests/call_tracer.rs | 88 --- .../vm_virtual_blocks/tests/default_aa.rs | 70 --- .../vm_virtual_blocks/tests/gas_limit.rs | 45 -- .../tests/get_used_contracts.rs | 106 ---- .../tests/invalid_bytecode.rs | 120 ----- .../tests/is_write_initial.rs | 42 -- .../tests/l1_tx_execution.rs | 125 ----- .../vm_virtual_blocks/tests/l2_blocks.rs | 502 ------------------ .../versions/vm_virtual_blocks/tests/mod.rs | 20 - .../vm_virtual_blocks/tests/nonce_holder.rs | 182 ------- .../vm_virtual_blocks/tests/refunds.rs | 152 ------ .../vm_virtual_blocks/tests/require_eip712.rs | 161 ------ .../vm_virtual_blocks/tests/rollbacks.rs | 146 ----- .../tests/simple_execution.rs | 77 --- .../tests/tester/inner_state.rs | 119 ----- .../vm_virtual_blocks/tests/tester/mod.rs | 7 - .../tests/tester/transaction_test_info.rs | 216 -------- .../tests/tester/vm_tester.rs | 291 ---------- .../tests/tracing_execution_error.rs | 52 -- .../vm_virtual_blocks/tests/upgrade.rs | 344 ------------ .../versions/vm_virtual_blocks/tests/utils.rs | 106 ---- 124 files changed, 17489 deletions(-) delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs deleted file mode 100644 index ba699e7558b..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs +++ /dev/null @@ -1,284 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::{ - aux_structures::Timestamp, zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK, -}; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_BATCH_TIP_OVERHEAD, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - HistoryEnabled, TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -fn execute_test(test_data: L1MessengerTestData) -> u32 { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs, - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!(!result.result.is_failed(), "Batch wasn't successful"); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - ergs_before - ergs_after -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -#[test] -fn test_dry_run_upper_bound() { - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let max_logs = execute_test(L1MessengerTestData { - l2_to_l1_logs: L2ToL1Log::MIN_L2_L1_LOGS_TREE_SIZE, - ..Default::default() - }); - - let max_messages = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; 0]; MAX_PUBDATA_PER_BLOCK as usize / L2ToL1Log::SERIALIZED_SIZE], - ..Default::default() - }); - - let long_message = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize]; 1], - ..Default::default() - }); - - let max_bytecodes = execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long - bytecodes: vec![vec![0; 32]; MAX_PUBDATA_PER_BLOCK as usize / 32], - ..Default::default() - }); - - let long_bytecode = execute_test(L1MessengerTestData { - // We have to add 48 since a valid bytecode must have an odd number of 32 byte words - bytecodes: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize + 48]; 1], - ..Default::default() - }); - - let lots_of_small_repeated_writes = execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_PUBDATA_PER_BLOCK as usize / 5), - ..Default::default() - }); - - let lots_of_big_repeated_writes = execute_test(L1MessengerTestData { - // Each big write will approximately require 32 bytes to encode - state_diffs: generate_state_diffs(true, false, MAX_PUBDATA_PER_BLOCK as usize / 32), - ..Default::default() - }); - - let lots_of_small_initial_writes = execute_test(L1MessengerTestData { - // Each initial write will take at least 32 bytes for derived key + 5 bytes for value - state_diffs: generate_state_diffs(false, true, MAX_PUBDATA_PER_BLOCK as usize / 37), - ..Default::default() - }); - - let lots_of_large_initial_writes = execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_PUBDATA_PER_BLOCK as usize / 64), - ..Default::default() - }); - - let max_used_gas = vec![ - max_logs, - max_messages, - long_message, - max_bytecodes, - long_bytecode, - lots_of_small_repeated_writes, - lots_of_big_repeated_writes, - lots_of_small_initial_writes, - lots_of_large_initial_writes, - ] - .into_iter() - .max() - .unwrap(); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - assert!( - max_used_gas * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs deleted file mode 100644 index 47e047ebbf7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs deleted file mode 100644 index 9db5e7326e7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs deleted file mode 100644 index 1a4c026a23f..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs deleted file mode 100644 index ecc2fdfe6c0..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled}, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.00226, 0.00077, 0.1195, 0.1429, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs deleted file mode 100644 index be8e253c6d8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs +++ /dev/null @@ -1,78 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs deleted file mode 100644 index 9dfda9e1a68..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs deleted file mode 100644 index a7cbcd8e295..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs deleted file mode 100644 index 75517138db3..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_1::tests::tester::VmTesterBuilder; -use crate::vm_1_4_1::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_1::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs deleted file mode 100644 index 7644064f4af..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs deleted file mode 100644 index e98fc23c6eb..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs +++ /dev/null @@ -1,189 +0,0 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} - -#[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: L1_MESSENGER_ADDRESS, - value: 0.into(), - factory_deps: None, - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs deleted file mode 100644 index 073d9ce5800..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs deleted file mode 100644 index a07608121bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs deleted file mode 100644 index 915a802b1e8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs deleted file mode 100644 index 37e871fbc70..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs deleted file mode 100644 index 8700eb14b53..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs +++ /dev/null @@ -1,166 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs deleted file mode 100644 index aebc956e673..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs deleted file mode 100644 index 2ae942c2652..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs deleted file mode 100644 index 384bc4cf325..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs deleted file mode 100644 index 11e9d7fd6df..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_1::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d980..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs deleted file mode 100644 index 443acf71676..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_1::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs deleted file mode 100644 index 24bd0b4d0bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs deleted file mode 100644 index 02c7590c1be..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_1::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs deleted file mode 100644 index af3701d919f..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs +++ /dev/null @@ -1,355 +0,0 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs deleted file mode 100644 index da69c107a20..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_1::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs deleted file mode 100644 index 8578b73ccfa..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs +++ /dev/null @@ -1,399 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful for input: {:?}", - test_data - ); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used - ); - - TestStatistics { - max_used_gas: ergs_before - ergs_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} - -#[test] -#[allow(clippy::vec_init_then_push)] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let mut statistics = Vec::new(); - - // max logs - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }); - - // max messages - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }); - - // long message - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }); - - // max bytecodes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }); - - // long bytecode - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; 1], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }); - - // lots of small repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }); - - // lots of big repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(true, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }); - - // lots of small initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs(false, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }); - - // lots of large initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs deleted file mode 100644 index 8d69d05c444..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs +++ /dev/null @@ -1,55 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs deleted file mode 100644 index dd91d6d94a9..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs +++ /dev/null @@ -1,40 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs deleted file mode 100644 index 2fafb7e51aa..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs deleted file mode 100644 index 7d0dfd1ed0e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder}, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.003438, 0.00077, 0.1195, 0.1429, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs deleted file mode 100644 index b0717a57c56..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs +++ /dev/null @@ -1,77 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs deleted file mode 100644 index b84e9d32126..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs +++ /dev/null @@ -1,44 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs deleted file mode 100644 index cfe3e1bfc23..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs deleted file mode 100644 index c79fcd8ba8e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_2::tests::tester::VmTesterBuilder; -use crate::vm_1_4_2::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_2::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs deleted file mode 100644 index 7da250ef7a9..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs deleted file mode 100644 index 021f5554873..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs +++ /dev/null @@ -1,188 +0,0 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} - -#[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: L1_MESSENGER_ADDRESS, - value: 0.into(), - factory_deps: None, - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs deleted file mode 100644 index f722890f474..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs deleted file mode 100644 index a07608121bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs deleted file mode 100644 index 9f1be4ec947..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs +++ /dev/null @@ -1,187 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_2::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs deleted file mode 100644 index 0a799288204..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs +++ /dev/null @@ -1,135 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs deleted file mode 100644 index 5586450f34b..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_test_account::TxType; -use zksync_types::{utils::deployed_address_create, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::PrestateTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, -}; - -#[test] -fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - vm.deploy_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm.test_contract.unwrap(), - false, - Default::default(), - true, - TxType::L2, - ); - vm.vm.push_transaction(tx1); - - let contract_address = vm.test_contract.unwrap(); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - assert!(prestate_result.1.contains_key(&contract_address)); -} - -#[test] -fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); - vm.test_contract = Some(deployed_address); - - // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce2 = tx2.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); - - let account = &mut vm.rich_accounts[0]; - - //enter ether to contract to see difference in the balance post execution - let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), - calldata: Default::default(), - value: U256::from(100000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); - - let tx1 = Execute { - contract_address: deployed_address2, - calldata: Default::default(), - value: U256::from(200000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx1, None)); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - //assert that the pre-state contains both deployed contracts with balance zero - assert!(prestate_result.0.contains_key(&deployed_address)); - assert!(prestate_result.0.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.0[&deployed_address].balance, - Some(U256::zero()) - ); - assert_eq!( - prestate_result.0[&deployed_address2].balance, - Some(U256::zero()) - ); - - //assert that the post-state contains both deployed contracts with the correct balance - assert!(prestate_result.1.contains_key(&deployed_address)); - assert!(prestate_result.1.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.1[&deployed_address].balance, - Some(U256::from(100000)) - ); - assert_eq!( - prestate_result.1[&deployed_address2].balance, - Some(U256::from(200000)) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs deleted file mode 100644 index 401c2c12a43..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm.push_raw_transaction( - tx.clone(), - overhead, - result.refunds.gas_refunded as u32, - true, - ); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund as u32, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs deleted file mode 100644 index 15f4504d6e1..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs +++ /dev/null @@ -1,164 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs deleted file mode 100644 index 2ce18cc0136..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs deleted file mode 100644 index 57b37e67b76..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::tester::{TxType, VmTesterBuilder}, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs deleted file mode 100644 index d6c072d1b1e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_2::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d980..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs deleted file mode 100644 index cb81c4c5ed7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_2::tests::tester::vm_tester::VmTester, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs deleted file mode 100644 index 44f861f8d33..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs deleted file mode 100644 index 138e8041e6a..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs +++ /dev/null @@ -1,51 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_2::tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs deleted file mode 100644 index 2af2928b1c4..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs +++ /dev/null @@ -1,352 +0,0 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_2::tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs deleted file mode 100644 index 5655e90fb4e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_2::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs deleted file mode 100644 index 57229abb097..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs deleted file mode 100644 index ad1b0f26036..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs deleted file mode 100644 index e9df4fa80ff..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs deleted file mode 100644 index b0cffa7d3c8..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs +++ /dev/null @@ -1,66 +0,0 @@ -use circuit_sequencer_api_1_4_0::geometry_config::get_geometry_config; -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let statistic = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - assert!(statistic.main_vm > f32::EPSILON); - assert!(statistic.ram_permutation > f32::EPSILON); - assert!(statistic.storage_application > f32::EPSILON); - assert!(statistic.storage_sorter > f32::EPSILON); - assert!(statistic.code_decommitter > f32::EPSILON); - assert!(statistic.code_decommitter_sorter > f32::EPSILON); - assert!(statistic.log_demuxer > f32::EPSILON); - assert!(statistic.events_sorter > f32::EPSILON); - assert!(statistic.keccak256 > f32::EPSILON); - // Single `ecrecover` should be used to validate tx signature. - assert_eq!( - statistic.ecrecover, - 1.0 / get_geometry_config().cycles_per_ecrecover_circuit as f32 - ); - // `sha256` shouldn't be used. - assert_eq!(statistic.sha256, 0.0); - - const EXPECTED_CIRCUITS_USED: f32 = 4.6363; - let delta = (statistic.total_f32() - EXPECTED_CIRCUITS_USED) / EXPECTED_CIRCUITS_USED; - - if delta.abs() > 0.1 { - panic!( - "Estimation differs from expected result by too much: {}%, expected value: {}, got {}", - delta * 100.0, - EXPECTED_CIRCUITS_USED, - statistic.total_f32(), - ); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs deleted file mode 100644 index a8c20cfebc1..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs +++ /dev/null @@ -1,76 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs deleted file mode 100644 index 637fd94c1c8..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs deleted file mode 100644 index 658bcd75b05..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs deleted file mode 100644 index 079e6d61b6c..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_boojum_integration::tests::tester::VmTesterBuilder; -use crate::vm_boojum_integration::types::inputs::system_env::TxExecutionMode; -use crate::vm_boojum_integration::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs deleted file mode 100644 index 67901490edf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs deleted file mode 100644 index b547f346d28..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs +++ /dev/null @@ -1,139 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rolling hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in L1Messenger - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs deleted file mode 100644 index d637d583c0e..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_0::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs deleted file mode 100644 index 95377232b3e..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs deleted file mode 100644 index 44ba3e4e323..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs deleted file mode 100644 index 516331d574f..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_0::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 sha256 calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 ecrecover call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs deleted file mode 100644 index 521bd81f2ef..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs +++ /dev/null @@ -1,167 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs deleted file mode 100644 index 90c3206b24b..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs deleted file mode 100644 index cfaf1952c70..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_0::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs deleted file mode 100644 index f6b1d83e02a..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs deleted file mode 100644 index 078a971e4bf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_0::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::{ - vm_boojum_integration::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs deleted file mode 100644 index 4d6572fe78a..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_boojum_integration::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs deleted file mode 100644 index fcea03e12cc..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs +++ /dev/null @@ -1,295 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs deleted file mode 100644 index 8c538dcf9bf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_boojum_integration::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs deleted file mode 100644 index bc3d62f62a1..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs +++ /dev/null @@ -1,362 +0,0 @@ -use zk_evm_1_4_0::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::read_test_contract; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{tester::VmTesterBuilder, utils::verify_required_storage}, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs deleted file mode 100644 index 4fba188ac5b..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs +++ /dev/null @@ -1,111 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_boojum_integration::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs deleted file mode 100644 index 23b250d485b..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::interface::ExecutionResult; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs deleted file mode 100644 index b2c126dea00..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs deleted file mode 100644 index fb2d3389407..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_refunds_enhancement::{CallTracer, HistoryEnabled}; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs deleted file mode 100644 index 92e043ae96f..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs deleted file mode 100644 index 1ff6ce12557..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; - -use crate::interface::TxExecutionMode; -use crate::vm_refunds_enhancement::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs deleted file mode 100644 index 8c121db3e43..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_refunds_enhancement::{HistoryDisabled, HistoryMode, Vm}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs deleted file mode 100644 index 88ed141630a..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::types::inputs::system_env::TxExecutionMode; -use crate::vm_refunds_enhancement::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs deleted file mode 100644 index d7b96133000..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs deleted file mode 100644 index 138879cd7ed..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs deleted file mode 100644 index 269b6cf396c..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs +++ /dev/null @@ -1,498 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_refunds_enhancement::tests::tester::default_l1_batch; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, Vm}; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs deleted file mode 100644 index ffb38dd3725..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs deleted file mode 100644 index 21959461906..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs +++ /dev/null @@ -1,181 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::TxExecutionMode; -use crate::interface::VmRevertReason; -use crate::interface::{ExecutionResult, Halt, TxRevertReason, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_nonce_holder_tester; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs deleted file mode 100644 index 54c281a9939..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; - -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs deleted file mode 100644 index 03a704841b0..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs +++ /dev/null @@ -1,163 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{ - AccountTreeId, Address, Eip712Domain, Execute, L2ChainId, Nonce, Transaction, U256, -}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_many_owners_custom_account_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs deleted file mode 100644 index 8107ddcdabf..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs +++ /dev/null @@ -1,259 +0,0 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::{ - BootloaderState, DynTracer, HistoryEnabled, HistoryMode, TracerExecutionStatus, - TracerExecutionStopReason, VmTracer, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - vec![Box::new(MaxRecursionTracer { - max_recursion_depth: 15, - })], - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.inspect(vec![], VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs deleted file mode 100644 index eb5e3879837..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs deleted file mode 100644 index 3158fc49444..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs +++ /dev/null @@ -1,127 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_refunds_enhancement::old_vm::event_sink::InMemoryEventSink; -use crate::vm_refunds_enhancement::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs deleted file mode 100644 index 8f7ecc0a733..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::VmRevertReason; -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, -}; -use crate::vm_refunds_enhancement::tests::tester::vm_tester::VmTester; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs deleted file mode 100644 index 800af517ed3..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs +++ /dev/null @@ -1,300 +0,0 @@ -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, -}; -use crate::vm_refunds_enhancement::tests::tester::Account; -use crate::vm_refunds_enhancement::tests::tester::TxType; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::utils::l2_blocks::load_last_l2_block; -use crate::vm_refunds_enhancement::{HistoryMode, Vm}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - history_mode: H, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new( - l1_batch, - self.vm.system_env.clone(), - self.storage.clone(), - self.history_mode.clone(), - ); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - history_mode: H, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - history_mode: self.history_mode.clone(), - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(history_mode: H) -> Self { - Self { - history_mode, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new( - l1_batch_env, - self.system_env, - storage_ptr.clone(), - self.history_mode.clone(), - ); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - history_mode: self.history_mode, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs deleted file mode 100644 index a839f4708ad..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::interface::TxExecutionMode; -use crate::interface::{TxRevertReason, VmRevertReason}; -use crate::vm_refunds_enhancement::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs deleted file mode 100644 index cbbec9a83d5..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs +++ /dev/null @@ -1,342 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::verify_required_storage; -use crate::vm_refunds_enhancement::HistoryEnabled; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs deleted file mode 100644 index ffbb9d89260..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_refunds_enhancement::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs deleted file mode 100644 index a30b5a58f63..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::vm_latest::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs deleted file mode 100644 index 773aa77e150..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs deleted file mode 100644 index 7ee647ee1f7..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::tracers::CallTracer; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_virtual_blocks::tracers::traits::ToTracerPointer; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect( - call_tracer.into_tracer_pointer().into(), - VmExecutionMode::OneTx, - ); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs deleted file mode 100644 index 02a69a6a5d2..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs deleted file mode 100644 index e51b8cab570..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; - -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs deleted file mode 100644 index 06d8191310b..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::HistoryMode; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_virtual_blocks::Vm; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs deleted file mode 100644 index f8074c1db10..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::interface::TxExecutionMode; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs deleted file mode 100644 index 2c7ef4a8d11..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs deleted file mode 100644 index 64d9f98ddb3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs deleted file mode 100644 index cba534deeaf..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs +++ /dev/null @@ -1,502 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ - ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_virtual_blocks::tests::tester::default_l1_batch; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_virtual_blocks::Vm; -use crate::HistoryMode; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs deleted file mode 100644 index ffb38dd3725..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs deleted file mode 100644 index 162a3f46cb1..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs +++ /dev/null @@ -1,182 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_nonce_holder_tester; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs deleted file mode 100644 index d0b3b7cbee3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs deleted file mode 100644 index 988841e90ce..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, Eip712Domain, Execute, Nonce, Transaction, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_many_owners_custom_account_contract; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, 270.into()).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(chain_id.into()); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, chain_id.into()); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, chain_id.into()).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs deleted file mode 100644 index 240b7188377..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs +++ /dev/null @@ -1,146 +0,0 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - -use zksync_types::{Execute, U256}; - -use crate::interface::TxExecutionMode; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, -}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs deleted file mode 100644 index c4eac73499f..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs deleted file mode 100644 index a5c0db9468b..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_virtual_blocks::old_vm::event_sink::InMemoryEventSink; -use crate::vm_virtual_blocks::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_virtual_blocks::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; -use crate::HistoryMode as CommonHistoryMode; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs deleted file mode 100644 index 15d3d98ab1d..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,216 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::vm_tester::VmTester; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs deleted file mode 100644 index 9fe0635eba3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs +++ /dev/null @@ -1,291 +0,0 @@ -use std::marker::PhantomData; -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use crate::HistoryMode; -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, VmExecutionMode}; -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::Account; -use crate::vm_virtual_blocks::tests::tester::TxType; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; -use crate::vm_virtual_blocks::utils::l2_blocks::load_last_l2_block; -use crate::vm_virtual_blocks::Vm; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - _phantom: PhantomData, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: 270.into(), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs deleted file mode 100644 index 8258abe0685..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::interface::{TxExecutionMode, TxRevertReason, VmRevertReason}; -use zksync_types::{Execute, H160}; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_virtual_blocks::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs deleted file mode 100644 index 8b3fa0ea291..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs +++ /dev/null @@ -1,344 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::verify_required_storage; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs deleted file mode 100644 index e3db232ffce..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_virtual_blocks::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; -use crate::vm_virtual_blocks::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} From b51c530672bdb679fbe720e2192ca153c9d10acd Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 7 Oct 2024 20:37:39 +0300 Subject: [PATCH 015/140] refactor(eth): Brush up `eth_signer` crate (#3014) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Brushes up the `eth_signer` crate so that its API is easier to use (e.g., it doesn't require an async runtime). ## Why ❔ Improved DevEx. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- Cargo.lock | 5 +- core/lib/eth_client/src/types.rs | 12 +--- core/lib/eth_signer/Cargo.toml | 9 ++- core/lib/eth_signer/src/error.rs | 1 - core/lib/eth_signer/src/lib.rs | 3 +- core/lib/eth_signer/src/pk_signer.rs | 65 +++++++++++-------- core/lib/eth_signer/src/raw_ethereum_tx.rs | 6 +- core/lib/multivm/Cargo.toml | 1 - .../versions/vm_fast/tests/require_eip712.rs | 9 ++- .../vm_latest/tests/require_eip712.rs | 9 ++- core/tests/test_account/src/lib.rs | 6 +- prover/Cargo.lock | 3 +- 12 files changed, 66 insertions(+), 63 deletions(-) delete mode 100644 core/lib/eth_signer/src/error.rs diff --git a/Cargo.lock b/Cargo.lock index 0873faae904..47d39f437c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10151,8 +10151,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "tokio", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -10473,7 +10473,6 @@ dependencies = [ "once_cell", "pretty_assertions", "thiserror", - "tokio", "tracing", "vise", "zk_evm 0.131.0-rc.2", diff --git a/core/lib/eth_client/src/types.rs b/core/lib/eth_client/src/types.rs index 59fb1cdeddc..dd332351afb 100644 --- a/core/lib/eth_client/src/types.rs +++ b/core/lib/eth_client/src/types.rs @@ -320,7 +320,7 @@ pub struct FailureInfo { #[cfg(test)] mod tests { - use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; + use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_types::{ eth_sender::{EthTxBlobSidecarV1, SidecarBlobV1}, web3, K256PrivateKey, EIP_4844_TX_TYPE, H256, U256, U64, @@ -384,10 +384,7 @@ mod tests { .as_ref(), )]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction.clone()); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with @@ -493,10 +490,7 @@ mod tests { blob_versioned_hashes: Some(vec![versioned_hash_1, versioned_hash_2]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index f760134e09b..92bb47824f3 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -11,10 +11,9 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_types.workspace = true +zksync_basic_types.workspace = true +zksync_crypto_primitives.workspace = true + +async-trait.workspace = true rlp.workspace = true thiserror.workspace = true -async-trait.workspace = true - -[dev-dependencies] -tokio = { workspace = true, features = ["full"] } diff --git a/core/lib/eth_signer/src/error.rs b/core/lib/eth_signer/src/error.rs deleted file mode 100644 index 8b137891791..00000000000 --- a/core/lib/eth_signer/src/error.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/lib/eth_signer/src/lib.rs b/core/lib/eth_signer/src/lib.rs index 3a92d47b062..8b6025eb15d 100644 --- a/core/lib/eth_signer/src/lib.rs +++ b/core/lib/eth_signer/src/lib.rs @@ -1,5 +1,6 @@ use async_trait::async_trait; -use zksync_types::{Address, EIP712TypedStructure, Eip712Domain, PackedEthSignature}; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{EIP712TypedStructure, Eip712Domain, PackedEthSignature}; pub use crate::{pk_signer::PrivateKeySigner, raw_ethereum_tx::TransactionParameters}; diff --git a/core/lib/eth_signer/src/pk_signer.rs b/core/lib/eth_signer/src/pk_signer.rs index 47b0e110991..0f55425a0d5 100644 --- a/core/lib/eth_signer/src/pk_signer.rs +++ b/core/lib/eth_signer/src/pk_signer.rs @@ -1,5 +1,7 @@ -use zksync_types::{ - Address, EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, +use async_trait::async_trait; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{ + EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, }; use crate::{ @@ -12,22 +14,20 @@ pub struct PrivateKeySigner { private_key: K256PrivateKey, } +// We define inherent methods duplicating `EthereumSigner` ones because they are sync and (other than `sign_typed_data`) infallible. impl PrivateKeySigner { pub fn new(private_key: K256PrivateKey) -> Self { Self { private_key } } -} -#[async_trait::async_trait] -impl EthereumSigner for PrivateKeySigner { - /// Get Ethereum address that matches the private key. - async fn get_address(&self) -> Result { - Ok(self.private_key.address()) + /// Gets an Ethereum address that matches this private key. + pub fn address(&self) -> Address { + self.private_key.address() } /// Signs typed struct using Ethereum private key by EIP-712 signature standard. /// Result of this function is the equivalent of RPC calling `eth_signTypedData`. - async fn sign_typed_data( + pub fn sign_typed_data( &self, domain: &Eip712Domain, typed_struct: &S, @@ -39,16 +39,11 @@ impl EthereumSigner for PrivateKeySigner { } /// Signs and returns the RLP-encoded transaction. - async fn sign_transaction( - &self, - raw_tx: TransactionParameters, - ) -> Result, SignerError> { + pub fn sign_transaction(&self, raw_tx: TransactionParameters) -> Vec { // According to the code in web3 // We should use `max_fee_per_gas` as `gas_price` if we use EIP1559 let gas_price = raw_tx.max_fee_per_gas; - let max_priority_fee_per_gas = raw_tx.max_priority_fee_per_gas; - let tx = Transaction { to: raw_tx.to, nonce: raw_tx.nonce, @@ -62,21 +57,42 @@ impl EthereumSigner for PrivateKeySigner { max_fee_per_blob_gas: raw_tx.max_fee_per_blob_gas, blob_versioned_hashes: raw_tx.blob_versioned_hashes, }; - let signed = tx.sign(&self.private_key, raw_tx.chain_id); - Ok(signed.raw_transaction.0) + signed.raw_transaction.0 + } +} + +#[async_trait] +impl EthereumSigner for PrivateKeySigner { + async fn get_address(&self) -> Result { + Ok(self.address()) + } + + async fn sign_typed_data( + &self, + domain: &Eip712Domain, + typed_struct: &S, + ) -> Result { + self.sign_typed_data(domain, typed_struct) + } + + async fn sign_transaction( + &self, + raw_tx: TransactionParameters, + ) -> Result, SignerError> { + Ok(self.sign_transaction(raw_tx)) } } #[cfg(test)] mod test { - use zksync_types::{K256PrivateKey, H160, H256, U256, U64}; + use zksync_basic_types::{H160, H256, U256, U64}; + use zksync_crypto_primitives::K256PrivateKey; - use super::PrivateKeySigner; - use crate::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; + use super::*; - #[tokio::test] - async fn test_generating_signed_raw_transaction() { + #[test] + fn test_generating_signed_raw_transaction() { let private_key = K256PrivateKey::from_bytes(H256::from([5; 32])).unwrap(); let signer = PrivateKeySigner::new(private_key); let raw_transaction = TransactionParameters { @@ -94,10 +110,7 @@ mod test { blob_versioned_hashes: None, max_fee_per_blob_gas: None, }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); assert_ne!(raw_tx.len(), 1); // pre-calculated signature with right algorithm implementation let precalculated_raw_tx: Vec = vec![ diff --git a/core/lib/eth_signer/src/raw_ethereum_tx.rs b/core/lib/eth_signer/src/raw_ethereum_tx.rs index 9479b5bd9d7..bea64305b47 100644 --- a/core/lib/eth_signer/src/raw_ethereum_tx.rs +++ b/core/lib/eth_signer/src/raw_ethereum_tx.rs @@ -10,11 +10,11 @@ //! Link to @Deniallugo's PR to web3: https://github.com/tomusdrw/rust-web3/pull/630 use rlp::RlpStream; -use zksync_types::{ - ethabi::Address, +use zksync_basic_types::{ web3::{keccak256, AccessList, Signature, SignedTransaction}, - K256PrivateKey, H256, U256, U64, + Address, H256, U256, U64, }; +use zksync_crypto_primitives::K256PrivateKey; const LEGACY_TX_ID: u64 = 0; const ACCESSLISTS_TX_ID: u64 = 1; diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 2c2cd4f044b..7d604157d1a 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -41,7 +41,6 @@ vise.workspace = true [dev-dependencies] assert_matches.workspace = true pretty_assertions.workspace = true -tokio = { workspace = true, features = ["time"] } zksync_test_account.workspace = true ethabi.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index e119cea0114..88fe2dab5c9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -1,5 +1,5 @@ use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; +use zksync_eth_signer::TransactionParameters; use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, @@ -38,8 +38,8 @@ impl VmTester<()> { /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. /// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -#[tokio::test] -async fn test_require_eip712() { +#[test] +fn test_require_eip712() { // Use 3 accounts: // - `private_address` - EOA account, where we have the key // - `account_address` - AA account, where the contract is deployed @@ -104,7 +104,7 @@ async fn test_require_eip712() { blob_versioned_hashes: None, }; - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); @@ -151,7 +151,6 @@ async fn test_require_eip712() { let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) - .await .unwrap(); let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index cdd71354c8d..a6dc7118005 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -1,5 +1,5 @@ use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; +use zksync_eth_signer::TransactionParameters; use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, @@ -29,11 +29,11 @@ impl VmTester { } // TODO refactor this test it use too much internal details of the VM -#[tokio::test] +#[test] /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. /// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { +fn test_require_eip712() { // Use 3 accounts: // - `private_address` - EOA account, where we have the key // - `account_address` - AA account, where the contract is deployed @@ -95,7 +95,7 @@ async fn test_require_eip712() { blob_versioned_hashes: None, }; - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); @@ -142,7 +142,6 @@ async fn test_require_eip712() { let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) - .await .unwrap(); let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index d0c97abab72..999ea6eb6e0 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -2,7 +2,7 @@ use ethabi::Token; use zksync_contracts::{ deployer_contract, load_contract, test_contracts::LoadnextContractExecutionParams, }; -use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; +use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_system_constants::{ CONTRACT_DEPLOYER_ADDRESS, DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, @@ -255,8 +255,8 @@ impl Account { PrivateKeySigner::new(self.private_key.clone()) } - pub async fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { + pub fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { let pk_signer = self.get_pk_signer(); - pk_signer.sign_transaction(tx).await.unwrap() + pk_signer.sign_transaction(tx) } } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 4ea83108a42..bcca59763a8 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7629,7 +7629,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] From 741b77e080f75c6a93d3ee779b1c9ce4297618f9 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 7 Oct 2024 20:56:39 +0300 Subject: [PATCH 016/140] fix(vm): Prepare new VM for use in API server and fix divergences (#2994) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Prepares the new VM for use in the API server (without tracers): - Fills in more statistics - Covers more statistic fields in divergence checks and fixes the corresponding divergence in `pubdata_published` - Fixes a divergence if revert reaches the bootloader call frame (happens in the call execution mode) - Fixes a panic in `ShadowVm` if the transaction hash is not set and makes `impl Debug for Transaction` non-panicking. (A hash is not set if a transaction is converted from a call.) ## Why ❔ Part of preparations for integrating the new VM into the API server. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Joonatan Saarhelo --- Cargo.lock | 8 +- Cargo.toml | 2 +- core/lib/multivm/src/versions/vm_1_3_2/vm.rs | 36 ++-- .../vm_1_4_1/bootloader_state/utils.rs | 4 +- .../vm_1_4_1/implementation/statistics.rs | 2 +- core/lib/multivm/src/versions/vm_1_4_1/vm.rs | 5 - .../vm_1_4_2/bootloader_state/utils.rs | 4 +- .../vm_1_4_2/implementation/statistics.rs | 2 +- core/lib/multivm/src/versions/vm_1_4_2/vm.rs | 5 - .../bootloader_state/utils.rs | 4 +- .../implementation/statistics.rs | 2 +- .../src/versions/vm_boojum_integration/vm.rs | 5 - .../vm_fast/bootloader_state/utils.rs | 4 +- .../src/versions/vm_fast/tests/rollbacks.rs | 36 +++- core/lib/multivm/src/versions/vm_fast/vm.rs | 180 +++++++++++------- .../vm_latest/bootloader_state/utils.rs | 4 +- .../vm_latest/implementation/statistics.rs | 2 +- .../src/versions/vm_latest/tests/rollbacks.rs | 40 +++- core/lib/multivm/src/versions/vm_latest/vm.rs | 6 +- core/lib/multivm/src/versions/vm_m5/vm.rs | 17 +- core/lib/multivm/src/versions/vm_m6/vm.rs | 34 ++-- .../bootloader_state/utils.rs | 4 +- .../implementation/statistics.rs | 2 +- .../src/versions/vm_refunds_enhancement/vm.rs | 5 - .../bootloader_state/utils.rs | 4 +- .../implementation/statistics.rs | 2 +- .../src/versions/vm_virtual_blocks/vm.rs | 5 - core/lib/multivm/src/vm_instance.rs | 13 +- core/lib/types/src/lib.rs | 24 ++- .../src/types/outputs/statistic.rs | 3 +- core/lib/vm_interface/src/utils/dump.rs | 6 +- core/lib/vm_interface/src/utils/shadow.rs | 28 ++- core/lib/vm_interface/src/vm.rs | 5 +- prover/Cargo.lock | 8 +- 34 files changed, 301 insertions(+), 210 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47d39f437c9..5073188d632 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11290,8 +11290,8 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "enum_dispatch", "primitive-types", @@ -11302,8 +11302,8 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "primitive-types", ] diff --git a/Cargo.toml b/Cargo.toml index 94fadb25968..691341f71ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -228,7 +228,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "74577d9be13b1bff9d1a712389731f669b179e47" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "a233d44bbe61dc6a758a754c3b78fe4f83e56699" } # Consensus dependencies. zksync_concurrency = "=0.3.0" diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 5692f103da3..89196788a76 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -22,6 +22,25 @@ pub struct Vm { pub(crate) system_env: SystemEnv, } +impl Vm { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } +} + impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; @@ -160,23 +179,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs index 393eb043cb7..1acf75b27e1 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs index 71ae20d4406..3a3b22ea246 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 68c8e92a03a..4122ee94e66 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -11,7 +11,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_1::{ @@ -124,10 +123,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs index 600ab83bf48..182f6eff441 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs index 92a2eaa650c..754b8476182 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index d6e1fbc68a8..fe2015debd2 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -13,7 +13,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_2::{ @@ -126,10 +125,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs index 1a1c620c2b2..c97d3ff30e4 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs index 46f8bc2f400..015d5acd340 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 17ce8365a0a..ebc0a511d20 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -11,7 +11,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_boojum_integration::{ @@ -125,10 +124,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs index f280f56a828..770f232019b 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -171,8 +171,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index e7b3f204338..cff72d8ec5a 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,9 +1,12 @@ +use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Execute, U256}; +use zksync_types::{Address, Execute, U256}; +use zksync_vm_interface::VmInterfaceExt; use crate::{ - interface::TxExecutionMode, + interface::{ExecutionResult, TxExecutionMode}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, utils::read_test_contract, @@ -142,3 +145,32 @@ fn test_vm_loadnext_rollbacks() { assert_eq!(result_without_rollbacks, result_with_rollbacks); } + +#[test] +fn rollback_in_call_mode() { + let counter_bytecode = read_test_contract(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_random_rich_accounts(1) + .build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + assert_eq!(vm_result.logs.storage_logs, []); +} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 36698de105c..10be6d88b04 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,6 +1,8 @@ use std::{collections::HashMap, fmt, mem}; -use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; +use zk_evm_1_5_0::{ + aux_structures::LogQuery, zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, +}; use zksync_contracts::SystemContractCode; use zksync_types::{ l1::is_l1_tx_type, @@ -17,7 +19,7 @@ use zksync_types::{ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use zksync_vm2::{ interface::{CallframeInterface, HeapId, StateInterface, Tracer}, - ExecutionEnd, FatPointer, Program, Settings, VirtualMachine, + ExecutionEnd, FatPointer, Program, Settings, StorageSlot, VirtualMachine, }; use super::{ @@ -35,8 +37,8 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, - VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, - VmRevertReason, VmTrackingContracts, + VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, + VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ @@ -58,6 +60,31 @@ const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemo type FullTracer = (Tr, CircuitsTracer); +#[derive(Debug)] +struct VmRunResult { + execution_result: ExecutionResult, + /// `true` if VM execution has terminated (as opposed to being stopped on a hook, e.g. when executing a single transaction + /// in a batch). Used for `execution_result == Revert { .. }` to understand whether VM logs should be reverted. + execution_ended: bool, + refunds: Refunds, + /// This value is used in stats. It's defined in the old VM as the latest value used when computing refunds (see the refunds tracer for `vm_latest`). + /// This is **not** equal to the pubdata diff before and after VM execution; e.g., when executing a batch tip, + /// `pubdata_published` is always 0 (since no refunds are computed). + pubdata_published: u32, +} + +impl VmRunResult { + fn should_ignore_vm_logs(&self) -> bool { + match &self.execution_result { + ExecutionResult::Success { .. } => false, + ExecutionResult::Halt { .. } => true, + // Logs generated during reverts should only be ignored if the revert has reached the root (bootloader) call frame, + // which is only possible with `TxExecutionMode::EthCall`. + ExecutionResult::Revert { .. } => self.execution_ended, + } + } +} + /// Fast VM wrapper. /// /// The wrapper is parametric by the storage and tracer types. Besides the [`Tracer`] trait, a tracer must have `'static` lifetime @@ -140,32 +167,35 @@ impl Vm { execution_mode: VmExecutionMode, tracer: &mut (Tr, CircuitsTracer), track_refunds: bool, - ) -> (ExecutionResult, Refunds) { + ) -> VmRunResult { let mut refunds = Refunds { gas_refunded: 0, operator_suggested_refund: 0, }; let mut last_tx_result = None; let mut pubdata_before = self.inner.pubdata() as u32; + let mut pubdata_published = 0; - let result = loop { + let (execution_result, execution_ended) = loop { let hook = match self.inner.run(&mut self.world, tracer) { ExecutionEnd::SuspendedOnHook(hook) => hook, - ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, + ExecutionEnd::ProgramFinished(output) => { + break (ExecutionResult::Success { output }, true); + } ExecutionEnd::Reverted(output) => { - break match TxRevertReason::parse_error(&output) { + let result = match TxRevertReason::parse_error(&output) { TxRevertReason::TxReverted(output) => ExecutionResult::Revert { output }, TxRevertReason::Halt(reason) => ExecutionResult::Halt { reason }, - } + }; + break (result, true); } ExecutionEnd::Panicked => { - break ExecutionResult::Halt { - reason: if self.gas_remaining() == 0 { - Halt::BootloaderOutOfGas - } else { - Halt::VMPanic - }, - } + let reason = if self.gas_remaining() == 0 { + Halt::BootloaderOutOfGas + } else { + Halt::VMPanic + }; + break (ExecutionResult::Halt { reason }, true); } }; @@ -175,7 +205,7 @@ impl Vm { } Hook::TxHasEnded => { if let VmExecutionMode::OneTx = execution_mode { - break last_tx_result.take().unwrap(); + break (last_tx_result.take().unwrap(), false); } } Hook::AskOperatorForRefund => { @@ -192,7 +222,8 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.pubdata() as u32; + let pubdata_after = self.inner.pubdata() as u32; + pubdata_published = pubdata_after.saturating_sub(pubdata_before); refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -200,7 +231,7 @@ impl Vm { gas_spent_on_pubdata.as_u64(), tx_gas_limit, gas_per_pubdata_byte.low_u32(), - pubdata_published.saturating_sub(pubdata_before), + pubdata_published, self.bootloader_state .last_l2_block() .txs @@ -209,7 +240,7 @@ impl Vm { .hash, ); - pubdata_before = pubdata_published; + pubdata_before = pubdata_after; let refund_value = refunds.operator_suggested_refund; self.write_to_bootloader_heap([( OPERATOR_REFUNDS_OFFSET + current_tx_index, @@ -305,7 +336,12 @@ impl Vm { } }; - (result, refunds) + VmRunResult { + execution_result, + execution_ended, + refunds, + pubdata_published, + } } fn get_hook_params(&self) -> [U256; 3] { @@ -430,24 +466,24 @@ impl Vm { } let storage = &mut self.world.storage; - let diffs = self.inner.world_diff().get_storage_changes().map( - move |((address, key), (initial_value, final_value))| { - let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); - StateDiffRecord { - address, - key, - derived_key: - zk_evm_1_5_0::aux_structures::LogQuery::derive_final_address_for_params( - &address, &key, - ), - enumeration_index: storage - .get_enumeration_index(&storage_key) - .unwrap_or_default(), - initial_value: initial_value.unwrap_or_default(), - final_value, - } - }, - ); + let diffs = + self.inner + .world_diff() + .get_storage_changes() + .map(move |((address, key), change)| { + let storage_key = + StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); + StateDiffRecord { + address, + key, + derived_key: LogQuery::derive_final_address_for_params(&address, &key), + enumeration_index: storage + .get_enumeration_index(&storage_key) + .unwrap_or_default(), + initial_value: change.before, + final_value: change.after, + } + }); diffs .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) .collect() @@ -477,9 +513,9 @@ impl Vm { events, deduplicated_storage_logs: world_diff .get_storage_changes() - .map(|((address, key), (_, value))| StorageLog { + .map(|((address, key), change)| StorageLog { key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), - value: u256_to_h256(value), + value: u256_to_h256(change.after), kind: StorageLogKind::RepeatedWrite, // Initialness doesn't matter here }) .collect(), @@ -527,18 +563,19 @@ impl VmInterface for Vm { } let start = self.inner.world_diff().snapshot(); - let pubdata_before = self.inner.pubdata(); let gas_before = self.gas_remaining(); let mut full_tracer = (mem::take(tracer), CircuitsTracer::default()); - let (result, refunds) = self.run(execution_mode, &mut full_tracer, track_refunds); + let result = self.run(execution_mode, &mut full_tracer, track_refunds); *tracer = full_tracer.0; // place the tracer back - let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) - && matches!(result, ExecutionResult::Halt { .. }); + let ignore_world_diff = + matches!(execution_mode, VmExecutionMode::OneTx) && result.should_ignore_vm_logs(); // If the execution is halted, the VM changes are expected to be rolled back by the caller. // Earlier VMs return empty execution logs in this case, so we follow this behavior. + // Likewise, if a revert has reached the bootloader frame (possible with `TxExecutionMode::EthCall`; otherwise, the bootloader catches reverts), + // old VMs revert all logs; the new VM doesn't do that automatically, so we recreate this behavior here. let logs = if ignore_world_diff { VmExecutionLogs::default() } else { @@ -556,7 +593,7 @@ impl VmInterface for Vm { StorageLogKind::RepeatedWrite }, }, - previous_value: u256_to_h256(change.before.unwrap_or_default()), + previous_value: u256_to_h256(change.before), }) .collect(); let events = merge_events( @@ -584,23 +621,24 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.pubdata(); let gas_remaining = self.gas_remaining(); + let gas_used = gas_before - gas_remaining; + VmExecutionResultAndLogs { - result, + result: result.execution_result, logs, // TODO (PLA-936): Fill statistics; investigate whether they should be zeroed on `Halt` statistics: VmExecutionStatistics { + gas_used: gas_used.into(), + gas_remaining, + computational_gas_used: gas_used, // since 1.5.0, this always has the same value as `gas_used` + pubdata_published: result.pubdata_published, + circuit_statistic: full_tracer.1.circuit_statistic(), contracts_used: 0, cycles_used: 0, - gas_used: (gas_before - gas_remaining).into(), - gas_remaining, - computational_gas_used: 0, total_log_queries: 0, - pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, - circuit_statistic: full_tracer.1.circuit_statistic(), }, - refunds, + refunds: result.refunds, } } @@ -628,10 +666,6 @@ impl VmInterface for Vm { self.bootloader_state.start_new_l2_block(l2_block_env) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - todo!("Unused during batch execution") - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut Tr::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); @@ -744,20 +778,27 @@ impl World { } impl zksync_vm2::StorageInterface for World { - fn read_storage(&mut self, contract: H160, key: U256) -> Option { + fn read_storage(&mut self, contract: H160, key: U256) -> StorageSlot { let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); - if self.storage.is_write_initial(key) { - None - } else { - Some(self.storage.read_value(key).as_bytes().into()) + let value = U256::from_big_endian(self.storage.read_value(key).as_bytes()); + // `is_write_initial` value can be true even if the slot has previously been written to / has non-zero value! + // This can happen during oneshot execution (i.e., executing a single transaction) since it emulates + // execution starting in the middle of a batch in the general case. Hence, a slot that was first written to in the batch + // must still be considered an initial write by the refund logic. + let is_write_initial = self.storage.is_write_initial(key); + StorageSlot { + value, + is_write_initial, } } - fn cost_of_writing_storage(&mut self, initial_value: Option, new_value: U256) -> u32 { - let is_initial = initial_value.is_none(); - let initial_value = initial_value.unwrap_or_default(); + fn read_storage_value(&mut self, contract: H160, key: U256) -> U256 { + let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); + U256::from_big_endian(self.storage.read_value(key).as_bytes()) + } - if initial_value == new_value { + fn cost_of_writing_storage(&mut self, slot: StorageSlot, new_value: U256) -> u32 { + if slot.value == new_value { return 0; } @@ -771,10 +812,9 @@ impl zksync_vm2::StorageInterface for World { // For value compression, we use a metadata byte which holds the length of the value and the operation from the // previous state to the new state, and the compressed value. The maximum for this is 33 bytes. // Total bytes for initial writes then becomes 65 bytes and repeated writes becomes 38 bytes. - let compressed_value_size = - compress_with_best_strategy(initial_value, new_value).len() as u32; + let compressed_value_size = compress_with_best_strategy(slot.value, new_value).len() as u32; - if is_initial { + if slot.is_write_initial { (BYTES_PER_DERIVED_KEY as u32) + compressed_value_size } else { (BYTES_PER_ENUMERATION_INDEX as u32) + compressed_value_size diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 4931082d6da..23c079202c1 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -175,8 +175,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index 34c1e1f81da..c1cf1504356 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -51,7 +51,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 880f189fd89..00a5d6494fe 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,12 +1,14 @@ +use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{get_nonce_key, Execute, U256}; +use zksync_types::{get_nonce_key, Address, Execute, U256}; use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, + VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, vm_latest::{ @@ -258,3 +260,37 @@ fn test_layered_rollback() { let result = vm.vm.execute(VmExecutionMode::OneTx); assert!(!result.result.is_failed(), "transaction must not fail"); } + +#[test] +fn rollback_in_call_mode() { + let counter_bytecode = read_test_contract(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_random_rich_accounts(1) + .build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + + let storage_logs = vm + .vm + .get_current_execution_state() + .deduplicated_storage_logs; + assert!( + storage_logs.iter().all(|log| !log.is_write()), + "{storage_logs:?}" + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 506b6666ecd..8ccd600a79e 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -13,7 +13,7 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmTrackingContracts, + VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_latest::{ @@ -161,10 +161,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 40f66659f29..5a26506f346 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -50,6 +50,10 @@ impl Vm { _phantom: Default::default(), } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics::default() + } } impl VmInterface for Vm { @@ -106,19 +110,6 @@ impl VmInterface for Vm { ) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: 0, - event_sink_history: 0, - memory_inner: 0, - memory_history: 0, - decommittment_processor_inner: 0, - decommittment_processor_history: 0, - storage_inner: 0, - storage_history: 0, - } - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 627687a5524..1fdc8ae64f8 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -50,6 +50,23 @@ impl Vm { system_env, } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } } impl VmInterface for Vm { @@ -186,23 +203,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index 7bd488f90a9..14c895d7a0b 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs index dcda1457b76..a73c212db29 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 735bd29c3b0..d87fd4d104d 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -8,7 +8,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_refunds_enhancement::{ @@ -118,10 +117,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index 2ccedcc6aa9..3e2474835fa 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs index d082085a155..dbd8813035e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 2a9d6eed6c7..28c09590f2a 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -8,7 +8,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_virtual_blocks::{ @@ -118,10 +117,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 8dd67e1ac4e..ac5693b6161 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -87,10 +87,6 @@ impl VmInterface for LegacyVmInstance { )) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - dispatch_legacy_vm!(self.record_vm_memory_metrics()) - } - /// Return the results of execution of all batch fn finish_batch(&mut self) -> FinishedL1Batch { dispatch_legacy_vm!(self.finish_batch()) @@ -213,6 +209,11 @@ impl LegacyVmInstance { } } } + + /// Returns memory-related oracle metrics. + pub fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + dispatch_legacy_vm!(self.record_vm_memory_metrics()) + } } /// Fast VM shadowed by the latest legacy VM. @@ -283,10 +284,6 @@ impl VmInterface for FastVmInsta } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - dispatch_fast_vm!(self.record_vm_memory_metrics()) - } - fn finish_batch(&mut self) -> FinishedL1Batch { dispatch_fast_vm!(self.finish_batch()) } diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 86b2e3f03d5..67661eb14ad 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -5,7 +5,7 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use std::{fmt, fmt::Debug}; +use std::fmt; use anyhow::Context as _; use fee::encoding_len; @@ -88,9 +88,16 @@ pub struct Transaction { pub raw_bytes: Option, } -impl std::fmt::Debug for Transaction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("Transaction").field(&self.hash()).finish() +impl fmt::Debug for Transaction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(hash) = self.hash_for_debugging() { + f.debug_tuple("Transaction").field(&hash).finish() + } else { + f.debug_struct("Transaction") + .field("initiator_account", &self.initiator_account()) + .field("nonce", &self.nonce()) + .finish() + } } } @@ -136,6 +143,15 @@ impl Transaction { } } + fn hash_for_debugging(&self) -> Option { + match &self.common_data { + ExecuteTransactionCommon::L1(data) => Some(data.hash()), + ExecuteTransactionCommon::L2(data) if data.input.is_some() => Some(data.hash()), + ExecuteTransactionCommon::L2(_) => None, + ExecuteTransactionCommon::ProtocolUpgrade(data) => Some(data.hash()), + } + } + /// Returns the account that initiated this transaction. pub fn initiator_account(&self) -> Address { match &self.common_data { diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index 095547076d4..f8e3851c832 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -109,7 +109,8 @@ pub struct VmExecutionStatistics { pub circuit_statistic: CircuitStatistic, } -/// Oracle metrics of the VM. +/// Oracle metrics reported by legacy VMs. +#[derive(Debug, Default)] pub struct VmMemoryMetrics { pub event_sink_inner: usize, pub event_sink_history: usize, diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs index 5dc2351dcf7..288c6445494 100644 --- a/core/lib/vm_interface/src/utils/dump.rs +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -7,7 +7,7 @@ use crate::{ storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmTrackingContracts, + VmTrackingContracts, }; fn create_storage_snapshot( @@ -177,10 +177,6 @@ impl VmInterface for DumpingVm { .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.inner.record_vm_memory_metrics() - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.inner.finish_batch() } diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs index 2819e54e9a7..92eb65a810f 100644 --- a/core/lib/vm_interface/src/utils/shadow.rs +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -12,7 +12,7 @@ use crate::{ storage::{ReadStorage, StoragePtr, StorageView}, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, VmTrackingContracts, + VmInterfaceHistoryEnabled, VmTrackingContracts, }; /// Handler for VM divergences. @@ -202,7 +202,8 @@ where tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { - let tx_hash = tx.hash(); + let tx_repr = format!("{tx:?}"); // includes little data, so is OK to call proactively + let (main_bytecodes_result, main_tx_result) = self.main.inspect_transaction_with_bytecode_compression( main_tracer, @@ -224,7 +225,7 @@ where errors.check_results_match(&main_tx_result, &shadow_result.1); if let Err(err) = errors.into_result() { let ctx = format!( - "inspecting transaction {tx_hash:?}, with_compression={with_compression:?}" + "inspecting transaction {tx_repr}, with_compression={with_compression:?}" ); self.report(err.context(ctx)); } @@ -232,10 +233,6 @@ where (main_bytecodes_result, main_tx_result) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.main.record_vm_memory_metrics() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let main_batch = self.main.finish_batch(); if let Some(shadow) = self.shadow.get_mut() { @@ -341,10 +338,25 @@ impl DivergenceErrors { &shadow_result.statistics.circuit_statistic, ); self.check_match( - "gas_remaining", + "statistics.pubdata_published", + &main_result.statistics.pubdata_published, + &shadow_result.statistics.pubdata_published, + ); + self.check_match( + "statistics.gas_remaining", &main_result.statistics.gas_remaining, &shadow_result.statistics.gas_remaining, ); + self.check_match( + "statistics.gas_used", + &main_result.statistics.gas_used, + &shadow_result.statistics.gas_used, + ); + self.check_match( + "statistics.computational_gas_used", + &main_result.statistics.computational_gas_used, + &shadow_result.statistics.computational_gas_used, + ); } fn check_match(&mut self, context: &str, main: &T, shadow: &T) { diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index 90ae76be805..37e33a92b50 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -15,7 +15,7 @@ use zksync_types::{Transaction, H256}; use crate::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmMemoryMetrics, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, }; pub trait VmInterface { @@ -44,9 +44,6 @@ pub trait VmInterface { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs); - /// Record VM memory metrics. - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics; - /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. fn finish_batch(&mut self) -> FinishedL1Batch; diff --git a/prover/Cargo.lock b/prover/Cargo.lock index bcca59763a8..92366b0912b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8123,8 +8123,8 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "enum_dispatch", "primitive-types", @@ -8135,8 +8135,8 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "primitive-types", ] From eeb1c2a8dbb21eb04d934566ca026d13a1f5b860 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 7 Oct 2024 20:16:34 +0200 Subject: [PATCH 017/140] feat: Increased the timeout of waiting for db in tests (#3007) I don't know about other people's setup, but for me it is very flaky with just 3s --- zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs index a08b0404605..d173bb95168 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs @@ -26,7 +26,7 @@ pub async fn reset_test_databases( for dal in dals { let mut url = dal.url.clone(); url.set_path(""); - wait_for_db(&url, 3).await?; + wait_for_db(&url, 20).await?; database::reset::reset_database(shell, link_to_code, dal.clone()).await?; } From 3fd2fb14e7283c6858731e162522e70051a8e162 Mon Sep 17 00:00:00 2001 From: Ivan Schasny <31857042+ischasny@users.noreply.github.com> Date: Mon, 7 Oct 2024 19:51:03 +0100 Subject: [PATCH 018/140] feat: add metric to track current cbt ratio (#3020) Add a new metric to track custom base token to ETH ratio --- .../node/base_token_adjuster/src/base_token_ratio_persister.rs | 3 +++ core/node/base_token_adjuster/src/metrics.rs | 1 + 2 files changed, 4 insertions(+) diff --git a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs index 220f100e5dc..785c9c4dfd7 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs @@ -90,6 +90,9 @@ impl BaseTokenRatioPersister { result: OperationResult::Success, }] .observe(start_time.elapsed()); + METRICS + .ratio + .set((ratio.numerator.get() as f64) / (ratio.denominator.get() as f64)); return Ok(ratio); } Err(err) => { diff --git a/core/node/base_token_adjuster/src/metrics.rs b/core/node/base_token_adjuster/src/metrics.rs index d84e4da0c0c..17a48c1b5c3 100644 --- a/core/node/base_token_adjuster/src/metrics.rs +++ b/core/node/base_token_adjuster/src/metrics.rs @@ -18,6 +18,7 @@ pub(crate) struct OperationResultLabels { #[metrics(prefix = "base_token_adjuster")] pub(crate) struct BaseTokenAdjusterMetrics { pub l1_gas_used: Gauge, + pub ratio: Gauge, #[metrics(buckets = Buckets::LATENCIES)] pub external_price_api_latency: Family>, #[metrics(buckets = Buckets::LATENCIES)] From deafa460715334a77edf9fe8aa76fa90029342c4 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 8 Oct 2024 21:08:04 +0300 Subject: [PATCH 019/140] =?UTF-8?q?feat(vm):=20EVM=20emulator=20support=20?= =?UTF-8?q?=E2=80=93=20base=20(#2979)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Modifies the Era codebase to support the EVM emulator. Intentionally avoids changing the `contracts` submodule yet; as a consequence, there are no EVM emulation tests. ## Why ❔ Stepping stone for EVM equivalence. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: IAvecilla Co-authored-by: Javier Chatruc --- core/bin/genesis_generator/src/main.rs | 1 + .../system-constants-generator/src/utils.rs | 3 + core/lib/config/src/configs/chain.rs | 3 + core/lib/config/src/configs/genesis.rs | 2 + core/lib/config/src/testonly.rs | 2 + core/lib/constants/src/contracts.rs | 5 + core/lib/contracts/src/lib.rs | 22 +- ...2dba1d37493d4c1db4b957cfec476a791b32.json} | 26 +- ...bef013ad12b66bdca7251be2af21e98fe870.json} | 26 +- ...dfcb3522a0772ac3d2476652df4216d823e04.json | 31 ++ ...7ca0e4b83f50724a0b18256aafab69909a98.json} | 28 +- ...12784694d2f8fe9a67159ad4c7abc2279ca6.json} | 16 +- ...7f64d0c9620506bb41890548181bccca9ee5.json} | 12 +- ...fe9b944b2dd80eb56965a5874ce3168e8c5e.json} | 14 +- ...9749bd5fc78b09578589c26d3017cc6bd192.json} | 30 +- ...70e7a5fe02b60d5d23e4d153190138112c5b.json} | 12 +- ...9fabba23fa52a17a54146931476681edbd24.json} | 26 +- ...76eb8a6a508aea04d93342df50dd9745c361.json} | 26 +- ...e0c8a39a49d1cea78ef771d4c64fbbc16756.json} | 18 +- ...5e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json} | 10 +- ...b2d1aa6b398c6981c8d4f35e499f42b01731.json} | 26 +- ...e82c5aa84c85b9486e81261d17901a786917.json} | 5 +- ...65162bce330edd9b16587e8f9fdab17a8456.json} | 12 +- ...9f1fd7606fdf3e6d4c882cea76eb579c24a93.json | 30 -- ...fc5d8943e65a30508898d90a098432050bc7.json} | 26 +- ...21ca4cc94c38a7d18023ef1e89de484e60d8.json} | 18 +- ...14f15fd7a5fa3d7f7bc56906817c70b04950.json} | 5 +- ...b243bb067514b67daaf084353e5ada15b23a.json} | 10 +- .../20240911161714_evm-simulator.down.sql | 3 + .../20240911161714_evm-simulator.up.sql | 4 + core/lib/dal/src/blocks_dal.rs | 59 +++ core/lib/dal/src/blocks_web3_dal.rs | 4 +- core/lib/dal/src/consensus/mod.rs | 2 +- core/lib/dal/src/factory_deps_dal.rs | 18 + core/lib/dal/src/models/storage_block.rs | 16 + .../src/models/storage_protocol_version.rs | 10 + core/lib/dal/src/models/storage_sync.rs | 7 + .../lib/dal/src/models/storage_transaction.rs | 26 +- core/lib/dal/src/protocol_versions_dal.rs | 12 +- .../lib/dal/src/protocol_versions_web3_dal.rs | 1 + core/lib/dal/src/sync_dal.rs | 1 + core/lib/dal/src/tests/mod.rs | 2 + core/lib/dal/src/transactions_web3_dal.rs | 60 ++- core/lib/env_config/src/chain.rs | 1 + core/lib/env_config/src/genesis.rs | 1 + .../src/multicall3/mod.rs | 2 + core/lib/multivm/Cargo.toml | 2 +- .../src/glue/types/vm/vm_block_result.rs | 6 + .../types/vm/vm_partial_execution_result.rs | 3 + .../glue/types/vm/vm_tx_execution_result.rs | 5 + .../vm_1_4_1/implementation/execution.rs | 1 + .../vm_1_4_2/implementation/execution.rs | 1 + .../implementation/execution.rs | 1 + .../vm_fast/tests/get_used_contracts.rs | 20 +- .../versions/vm_fast/tests/require_eip712.rs | 4 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 1 + .../vm_latest/implementation/execution.rs | 8 + .../versions/vm_latest/implementation/tx.rs | 7 +- .../vm_latest/old_vm/oracles/decommitter.rs | 76 ++-- .../versions/vm_latest/tests/evm_emulator.rs | 76 ++++ .../vm_latest/tests/get_used_contracts.rs | 19 +- .../vm_latest/tests/l1_tx_execution.rs | 2 +- .../src/versions/vm_latest/tests/mod.rs | 1 + .../versions/vm_latest/tests/nonce_holder.rs | 24 +- .../src/versions/vm_latest/tests/refunds.rs | 2 +- .../vm_latest/tests/require_eip712.rs | 4 +- .../vm_latest/tracers/default_tracers.rs | 18 +- .../vm_latest/tracers/evm_deploy_tracer.rs | 105 +++++ .../src/versions/vm_latest/tracers/mod.rs | 2 + .../types/internals/transaction_data.rs | 19 +- .../vm_latest/types/internals/vm_state.rs | 20 +- .../src/versions/vm_latest/utils/mod.rs | 53 ++- .../vm_latest/utils/transaction_encoding.rs | 4 +- core/lib/multivm/src/versions/vm_latest/vm.rs | 18 +- .../implementation/execution.rs | 1 + .../implementation/execution.rs | 1 + core/lib/protobuf_config/src/chain.rs | 1 + core/lib/protobuf_config/src/genesis.rs | 7 + core/lib/protobuf_config/src/lib.rs | 11 +- .../src/proto/config/genesis.proto | 1 + core/lib/prover_interface/src/inputs.rs | 2 + core/lib/tee_verifier/src/lib.rs | 1 + core/lib/types/src/api/mod.rs | 16 +- core/lib/types/src/commitment/mod.rs | 11 +- core/lib/types/src/commitment/tests/mod.rs | 5 + .../post_boojum_1_5_0_test_with_evm.json | 359 ++++++++++++++++++ core/lib/types/src/l2/mod.rs | 4 +- core/lib/types/src/lib.rs | 13 +- core/lib/types/src/protocol_upgrade.rs | 32 +- core/lib/types/src/storage/mod.rs | 11 +- core/lib/types/src/system_contracts.rs | 51 ++- core/lib/types/src/transaction_request.rs | 56 ++- core/lib/types/src/tx/execute.rs | 26 +- core/lib/vm_executor/src/oneshot/block.rs | 24 +- core/lib/vm_executor/src/oneshot/contracts.rs | 16 +- core/lib/vm_executor/src/oneshot/mock.rs | 1 + core/lib/vm_executor/src/storage.rs | 6 +- .../lib/vm_interface/src/storage/in_memory.rs | 2 +- .../src/types/outputs/execution_result.rs | 6 + .../src/types/outputs/finished_l1batch.rs | 1 + .../src/execution_sandbox/execute.rs | 8 +- .../api_server/src/execution_sandbox/mod.rs | 19 +- .../api_server/src/execution_sandbox/tests.rs | 49 ++- .../src/tx_sender/gas_estimation.rs | 13 +- core/node/api_server/src/tx_sender/mod.rs | 10 +- .../api_server/src/tx_sender/tests/call.rs | 2 +- .../src/tx_sender/tests/gas_estimation.rs | 18 +- .../api_server/src/tx_sender/tests/mod.rs | 5 + .../api_server/src/tx_sender/tests/send_tx.rs | 21 +- .../api_server/src/web3/namespaces/debug.rs | 6 +- .../node/api_server/src/web3/namespaces/en.rs | 4 + .../api_server/src/web3/namespaces/eth.rs | 22 +- .../api_server/src/web3/namespaces/zks.rs | 38 +- core/node/api_server/src/web3/state.rs | 13 +- core/node/api_server/src/web3/tests/vm.rs | 84 +++- core/node/commitment_generator/src/lib.rs | 1 + core/node/consensus/src/batch.rs | 2 +- core/node/consensus/src/storage/testonly.rs | 2 +- core/node/eth_sender/src/eth_tx_aggregator.rs | 56 ++- core/node/eth_sender/src/tester.rs | 10 +- core/node/eth_sender/src/tests.rs | 154 ++++++-- core/node/eth_sender/src/zksync_functions.rs | 4 + core/node/eth_watch/src/tests.rs | 18 +- core/node/genesis/src/lib.rs | 12 +- core/node/genesis/src/utils.rs | 3 +- core/node/node_sync/src/external_io.rs | 15 + core/node/node_sync/src/genesis.rs | 23 +- core/node/node_sync/src/tests.rs | 8 + core/node/proof_data_handler/src/tests.rs | 1 + core/node/state_keeper/src/executor/mod.rs | 2 +- .../state_keeper/src/executor/tests/tester.rs | 2 +- core/node/state_keeper/src/io/persistence.rs | 3 +- core/node/state_keeper/src/io/tests/mod.rs | 6 +- core/node/state_keeper/src/io/tests/tester.rs | 2 +- core/node/state_keeper/src/keeper.rs | 9 +- .../state_keeper/src/seal_criteria/mod.rs | 3 + core/node/state_keeper/src/testonly/mod.rs | 1 + .../src/testonly/test_batch_executor.rs | 2 + core/node/state_keeper/src/tests/mod.rs | 1 + .../src/updates/l1_batch_updates.rs | 3 + .../src/updates/l2_block_updates.rs | 38 +- core/node/state_keeper/src/updates/mod.rs | 8 +- core/node/test_utils/src/lib.rs | 3 + core/node/vm_runner/src/impls/bwip.rs | 18 + core/node/vm_runner/src/tests/mod.rs | 1 + .../vm_runner/src/tests/output_handler.rs | 1 + core/tests/test_account/src/lib.rs | 7 +- .../contracts/mock-evm/mock-evm.sol | 92 +++++ etc/env/file_based/genesis.yaml | 2 + prover/Cargo.lock | 1 + .../src/rounds/basic_circuits/utils.rs | 7 +- .../forge_interface/deploy_ecosystem/input.rs | 2 + 152 files changed, 2189 insertions(+), 487 deletions(-) rename core/lib/dal/.sqlx/{query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json => query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json} (73%) rename core/lib/dal/.sqlx/{query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json => query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json} (74%) create mode 100644 core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json rename core/lib/dal/.sqlx/{query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json => query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json} (83%) rename core/lib/dal/.sqlx/{query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json => query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json} (81%) rename core/lib/dal/.sqlx/{query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json => query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json} (74%) rename core/lib/dal/.sqlx/{query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json => query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json} (82%) rename core/lib/dal/.sqlx/{query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json => query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json} (80%) rename core/lib/dal/.sqlx/{query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json => query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json} (62%) rename core/lib/dal/.sqlx/{query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json => query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json} (80%) rename core/lib/dal/.sqlx/{query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json => query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json} (69%) rename core/lib/dal/.sqlx/{query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json => query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json} (81%) rename core/lib/dal/.sqlx/{query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json => query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json} (73%) rename core/lib/dal/.sqlx/{query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json => query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json} (80%) rename core/lib/dal/.sqlx/{query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json => query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json} (54%) rename core/lib/dal/.sqlx/{query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json => query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json} (52%) delete mode 100644 core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json rename core/lib/dal/.sqlx/{query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json => query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json} (78%) rename core/lib/dal/.sqlx/{query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json => query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json} (83%) rename core/lib/dal/.sqlx/{query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json => query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json} (52%) rename core/lib/dal/.sqlx/{query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json => query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json} (71%) create mode 100644 core/lib/dal/migrations/20240911161714_evm-simulator.down.sql create mode 100644 core/lib/dal/migrations/20240911161714_evm-simulator.up.sql create mode 100644 core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs create mode 100644 core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs create mode 100644 core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json create mode 100644 etc/contracts-test-data/contracts/mock-evm/mock-evm.sol diff --git a/core/bin/genesis_generator/src/main.rs b/core/bin/genesis_generator/src/main.rs index 4f8200b3af7..2a96cdc6c6c 100644 --- a/core/bin/genesis_generator/src/main.rs +++ b/core/bin/genesis_generator/src/main.rs @@ -87,6 +87,7 @@ async fn generate_new_config( genesis_commitment: None, bootloader_hash: Some(base_system_contracts.bootloader), default_aa_hash: Some(base_system_contracts.default_aa), + evm_emulator_hash: base_system_contracts.evm_emulator, ..genesis_config }; diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 43ac9841c40..8d36f734467 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -71,12 +71,14 @@ pub static GAS_TEST_SYSTEM_CONTRACTS: Lazy = Lazy::new(|| { let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); let hash = hash_bytecode(&bytecode); + BaseSystemContracts { default_aa: SystemContractCode { code: bytes_to_be_words(bytecode), hash, }, bootloader, + evm_emulator: None, } }); @@ -221,6 +223,7 @@ pub(super) fn execute_internal_transfer_test() -> u32 { let base_system_smart_contracts = BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, }; let system_env = SystemEnv { diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 7e33f6964bb..c117064dbc4 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -138,6 +138,8 @@ pub struct StateKeeperConfig { pub bootloader_hash: Option, #[deprecated(note = "Use GenesisConfig::default_aa_hash instead")] pub default_aa_hash: Option, + #[deprecated(note = "Use GenesisConfig::evm_emulator_hash instead")] + pub evm_emulator_hash: Option, #[deprecated(note = "Use GenesisConfig::l1_batch_commit_data_generator_mode instead")] #[serde(default)] pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, @@ -178,6 +180,7 @@ impl StateKeeperConfig { protective_reads_persistence_enabled: true, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, } } diff --git a/core/lib/config/src/configs/genesis.rs b/core/lib/config/src/configs/genesis.rs index 6c4bacc3a6e..9e1ffbd87cb 100644 --- a/core/lib/config/src/configs/genesis.rs +++ b/core/lib/config/src/configs/genesis.rs @@ -17,6 +17,7 @@ pub struct GenesisConfig { pub genesis_commitment: Option, pub bootloader_hash: Option, pub default_aa_hash: Option, + pub evm_emulator_hash: Option, pub l1_chain_id: L1ChainId, pub sl_chain_id: Option, pub l2_chain_id: L2ChainId, @@ -49,6 +50,7 @@ impl GenesisConfig { genesis_commitment: Some(H256::repeat_byte(0x17)), bootloader_hash: Default::default(), default_aa_hash: Default::default(), + evm_emulator_hash: Default::default(), l1_chain_id: L1ChainId(9), sl_chain_id: None, protocol_version: Some(ProtocolSemanticVersion { diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 1d90034410b..a6ff30e04a9 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -192,6 +192,7 @@ impl Distribution for EncodeDist { fee_account_addr: None, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: Default::default(), } } @@ -732,6 +733,7 @@ impl Distribution for EncodeDist { genesis_commitment: Some(rng.gen()), bootloader_hash: Some(rng.gen()), default_aa_hash: Some(rng.gen()), + evm_emulator_hash: Some(rng.gen()), fee_account: rng.gen(), l1_chain_id: L1ChainId(self.sample(rng)), sl_chain_id: None, diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index 73b4a0ffaaa..fe37ef6c69f 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -130,6 +130,11 @@ pub const CODE_ORACLE_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x12, ]); +pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x13, +]); + /// Note, that the `Create2Factory` is explicitly deployed on a non-system-contract address. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a72b5c95d1b..fb28693887a 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -293,6 +293,7 @@ fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { fs::read(&bytecode_path) .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) } + /// Hash of code and code which consists of 32 bytes words #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SystemContractCode { @@ -304,18 +305,23 @@ pub struct SystemContractCode { pub struct BaseSystemContracts { pub bootloader: SystemContractCode, pub default_aa: SystemContractCode, + /// Never filled in constructors for now. The only way to get the EVM emulator enabled is to call [`Self::with_evm_emulator()`]. + pub evm_emulator: Option, } #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq)] pub struct BaseSystemContractsHashes { pub bootloader: H256, pub default_aa: H256, + pub evm_emulator: Option, } impl PartialEq for BaseSystemContracts { fn eq(&self, other: &Self) -> bool { self.bootloader.hash == other.bootloader.hash && self.default_aa.hash == other.default_aa.hash + && self.evm_emulator.as_ref().map(|contract| contract.hash) + == other.evm_emulator.as_ref().map(|contract| contract.hash) } } @@ -339,14 +345,27 @@ impl BaseSystemContracts { BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, } } - // BaseSystemContracts with proved bootloader - for handling transactions. + + /// BaseSystemContracts with proved bootloader - for handling transactions. pub fn load_from_disk() -> Self { let bootloader_bytecode = read_proved_batch_bootloader_bytecode(); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + /// Loads the latest EVM emulator for these base system contracts. Logically, it only makes sense to do for the latest protocol version. + pub fn with_latest_evm_emulator(mut self) -> Self { + let bytecode = read_sys_contract_bytecode("", "EvmInterpreter", ContractLanguage::Yul); + let hash = hash_bytecode(&bytecode); + self.evm_emulator = Some(SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }); + self + } + /// BaseSystemContracts with playground bootloader - used for handling eth_calls. pub fn playground() -> Self { let bootloader_bytecode = read_playground_batch_bootloader_bytecode(); @@ -475,6 +494,7 @@ impl BaseSystemContracts { BaseSystemContractsHashes { bootloader: self.bootloader.hash, default_aa: self.default_aa.hash, + evm_emulator: self.evm_emulator.as_ref().map(|contract| contract.hash), } } } diff --git a/core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json b/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json similarity index 73% rename from core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json rename to core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json index 9e212249490..c93e6aef3e7 100644 --- a/core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json +++ b/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -161,6 +166,7 @@ true, true, true, + true, false, true, true, @@ -169,5 +175,5 @@ true ] }, - "hash": "ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa" + "hash": "05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32" } diff --git a/core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json b/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json similarity index 74% rename from core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json rename to core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json index 8bf22e1b6fb..a3d356f4bea 100644 --- a/core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json +++ b/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -165,11 +170,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada" + "hash": "16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870" } diff --git a/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json b/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json new file mode 100644 index 00000000000..35c606bf22b --- /dev/null +++ b/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json @@ -0,0 +1,31 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n NOW(),\n NOW()\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Bytea", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "Int8", + "Int8", + "Int8", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04" +} diff --git a/core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json b/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json similarity index 83% rename from core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json rename to core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json index 93d522f5fb7..752e171f58c 100644 --- a/core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json +++ b/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "compressed_state_diffs", - "type_info": "Bytea" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,16 +120,21 @@ }, { "ordinal": 23, - "name": "events_queue_commitment", + "name": "compressed_state_diffs", "type_info": "Bytea" }, { "ordinal": 24, - "name": "bootloader_initial_content_commitment", + "name": "events_queue_commitment", "type_info": "Bytea" }, { "ordinal": 25, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -165,8 +170,9 @@ false, true, true, + true, true ] }, - "hash": "5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb" + "hash": "4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98" } diff --git a/core/lib/dal/.sqlx/query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json b/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json similarity index 81% rename from core/lib/dal/.sqlx/query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json rename to core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json index aa7d4c65a39..6f77a656072 100644 --- a/core/lib/dal/.sqlx/query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json +++ b/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -50,21 +50,26 @@ }, { "ordinal": 9, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 10, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 11, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 12, "name": "protocol_version!", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 13, "name": "fee_account_address!", "type_info": "Bytea" } @@ -85,11 +90,12 @@ true, true, true, + true, false, false, true, false ] }, - "hash": "778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d" + "hash": "51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6" } diff --git a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json b/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json similarity index 74% rename from core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json rename to core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json index f440a265593..b2f195c4e5c 100644 --- a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json +++ b/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -90,11 +90,16 @@ }, { "ordinal": 17, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 18, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 18, + "ordinal": 19, "name": "fee_account_address", "type_info": "Bytea" } @@ -123,8 +128,9 @@ true, true, true, + true, false ] }, - "hash": "b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad" + "hash": "7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5" } diff --git a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json b/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json similarity index 82% rename from core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json rename to core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json index 4a73fde57e2..28fbea09998 100644 --- a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json +++ b/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -55,16 +55,21 @@ }, { "ordinal": 10, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 11, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 11, + "ordinal": 12, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 12, + "ordinal": 13, "name": "pubdata_input", "type_info": "Bytea" } @@ -86,9 +91,10 @@ true, true, true, + true, false, true ] }, - "hash": "454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd" + "hash": "7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e" } diff --git a/core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json b/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json similarity index 80% rename from core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json rename to core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json index a9eae0bd01d..8f41bf3b491 100644 --- a/core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json +++ b/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "system_logs", - "type_info": "ByteaArray" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,16 +120,21 @@ }, { "ordinal": 23, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -161,12 +166,13 @@ true, true, true, - false, true, true, + false, + true, true, true ] }, - "hash": "60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7" + "hash": "860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192" } diff --git a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json similarity index 62% rename from core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json rename to core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json index 162c722add9..d944b6abf9e 100644 --- a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json +++ b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_versions.evm_emulator_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -25,11 +25,16 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "patch", "type_info": "Int4" }, { - "ordinal": 5, + "ordinal": 6, "name": "snark_wrapper_vk_hash", "type_info": "Bytea" } @@ -44,9 +49,10 @@ false, false, false, + true, false, false ] }, - "hash": "c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7" + "hash": "89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b" } diff --git a/core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json b/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json similarity index 80% rename from core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json rename to core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json index a96d94a5c55..9eb67bb8299 100644 --- a/core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json +++ b/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -160,11 +165,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e" + "hash": "9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24" } diff --git a/core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json b/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json similarity index 69% rename from core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json rename to core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json index 671b56760d6..55d56cc4ab0 100644 --- a/core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json +++ b/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -166,11 +171,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3" + "hash": "9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361" } diff --git a/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json b/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json similarity index 81% rename from core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json rename to core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json index 26a3458bff9..c8c438295e4 100644 --- a/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json +++ b/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -65,26 +65,31 @@ }, { "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 14, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" } @@ -106,11 +111,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded" + "hash": "a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756" } diff --git a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json similarity index 73% rename from core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json rename to core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json index 81ae6c590f9..28ffcc5ae46 100644 --- a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json +++ b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", "describe": { "columns": [ { @@ -82,6 +82,11 @@ "ordinal": 15, "name": "default_aa_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -105,8 +110,9 @@ false, true, true, + true, true ] }, - "hash": "2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1" + "hash": "a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6" } diff --git a/core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json b/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json similarity index 80% rename from core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json rename to core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json index 0b1daaa10e5..6588ee2f11e 100644 --- a/core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json +++ b/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -164,11 +169,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d" + "hash": "b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731" } diff --git a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json similarity index 54% rename from core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json rename to core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json index 8c41c0ab976..9d9fa72595d 100644 --- a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json +++ b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW())\n ON CONFLICT DO NOTHING\n ", + "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, NOW())\n ON CONFLICT DO NOTHING\n ", "describe": { "columns": [], "parameters": { @@ -9,10 +9,11 @@ "Int8", "Bytea", "Bytea", + "Bytea", "Bytea" ] }, "nullable": [] }, - "hash": "048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016" + "hash": "b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917" } diff --git a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json similarity index 52% rename from core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json rename to core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json index eba36994fb3..2689716c38a 100644 --- a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json +++ b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -12,6 +12,11 @@ "ordinal": 1, "name": "default_account_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -21,8 +26,9 @@ }, "nullable": [ false, - false + false, + true ] }, - "hash": "5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6" + "hash": "bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456" } diff --git a/core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json b/core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json deleted file mode 100644 index 09e34a7e33a..00000000000 --- a/core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Bytea", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4", - "Int8", - "Int8", - "Int8", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93" -} diff --git a/core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json b/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json similarity index 78% rename from core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json rename to core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json index 10e2a76618f..032cf987fc0 100644 --- a/core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json +++ b/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -162,11 +167,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862" + "hash": "da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7" } diff --git a/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json b/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json similarity index 83% rename from core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json rename to core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json index 74a6187e644..700352c1a8b 100644 --- a/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json +++ b/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -65,26 +65,31 @@ }, { "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 14, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" } @@ -108,11 +113,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d" + "hash": "f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8" } diff --git a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json similarity index 52% rename from core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json rename to core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json index d2c999a70d4..4fe32531a3f 100644 --- a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json +++ b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", + "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ", "describe": { "columns": [], "parameters": { @@ -19,6 +19,7 @@ "Jsonb", "Bytea", "Bytea", + "Bytea", "Int4", "ByteaArray", "Int8Array", @@ -29,5 +30,5 @@ }, "nullable": [] }, - "hash": "9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e" + "hash": "f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950" } diff --git a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json similarity index 71% rename from core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json rename to core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json index 5e9051587bb..1b50a750dac 100644 --- a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json +++ b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -25,6 +25,11 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "upgrade_tx_hash", "type_info": "Bytea" } @@ -39,8 +44,9 @@ false, false, false, + true, true ] }, - "hash": "5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355" + "hash": "f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a" } diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql new file mode 100644 index 00000000000..74ac4e60383 --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE protocol_versions DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE miniblocks DROP COLUMN IF EXISTS evm_emulator_code_hash; diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql new file mode 100644 index 00000000000..43ae361e7ee --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql @@ -0,0 +1,4 @@ +ALTER TABLE protocol_versions ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +-- We need this column in `miniblocks` as well in order to store data for the pending L1 batch +ALTER TABLE miniblocks ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 5b351511a06..59cc557f36e 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -325,6 +325,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -366,6 +367,7 @@ impl BlocksDal<'_, '_> { used_contract_hashes, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, system_logs, pubdata_input @@ -610,6 +612,7 @@ impl BlocksDal<'_, '_> { used_contract_hashes, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, system_logs, storage_refunds, @@ -641,6 +644,7 @@ impl BlocksDal<'_, '_> { $18, $19, $20, + $21, NOW(), NOW() ) @@ -659,6 +663,11 @@ impl BlocksDal<'_, '_> { used_contract_hashes, header.base_system_contracts_hashes.bootloader.as_bytes(), header.base_system_contracts_hashes.default_aa.as_bytes(), + header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), header.protocol_version.map(|v| v as i32), &system_logs, &storage_refunds, @@ -703,6 +712,7 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, @@ -730,6 +740,7 @@ impl BlocksDal<'_, '_> { $15, $16, $17, + $18, NOW(), NOW() ) @@ -752,6 +763,11 @@ impl BlocksDal<'_, '_> { .base_system_contracts_hashes .default_aa .as_bytes(), + l2_block_header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), l2_block_header.protocol_version.map(|v| v as i32), i64::from(l2_block_header.virtual_blocks), l2_block_header.batch_fee_input.fair_pubdata_price() as i64, @@ -780,6 +796,7 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, @@ -820,6 +837,7 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, @@ -1038,6 +1056,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1224,6 +1243,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1304,6 +1324,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1377,6 +1398,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1504,6 +1526,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1568,6 +1591,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1646,6 +1670,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -2695,6 +2720,40 @@ mod tests { .is_err()); } + #[tokio::test] + async fn persisting_evm_emulator_hash() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut l2_block_header = create_l2_block_header(1); + l2_block_header.base_system_contracts_hashes.evm_emulator = Some(H256::repeat_byte(0x23)); + conn.blocks_dal() + .insert_l2_block(&l2_block_header) + .await + .unwrap(); + + let mut fetched_block_header = conn + .blocks_dal() + .get_last_sealed_l2_block_header() + .await + .unwrap() + .expect("no block"); + // Batch fee input isn't restored exactly + fetched_block_header.batch_fee_input = l2_block_header.batch_fee_input; + + assert_eq!(fetched_block_header, l2_block_header); + // ...and a sanity check just to be sure + assert!(fetched_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some()); + } + #[tokio::test] async fn loading_l1_batch_header() { let pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 904e167d1a6..c1a1e6765b6 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -673,6 +673,7 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, miniblocks.protocol_version, miniblocks.fee_account_address FROM @@ -744,7 +745,8 @@ impl BlocksWeb3Dal<'_, '_> { mb.l2_fair_gas_price, mb.fair_pubdata_price, l1_batches.bootloader_code_hash, - l1_batches.default_aa_code_hash + l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash FROM l1_batches INNER JOIN mb ON TRUE diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index f01655d56a9..876dfe14bed 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -244,7 +244,7 @@ impl ProtoRepr for proto::TransactionV25 { }, T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), }; - tx.try_into() + Transaction::from_abi(tx, true) } fn build(tx: &Self::Type) -> Self { diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 36dfaa1a466..857e2973ae3 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -94,6 +94,7 @@ impl FactoryDepsDal<'_, '_> { &mut self, bootloader_hash: H256, default_aa_hash: H256, + evm_emulator_hash: Option, ) -> anyhow::Result { let bootloader_bytecode = self .get_sealed_factory_dep(bootloader_hash) @@ -115,9 +116,26 @@ impl FactoryDepsDal<'_, '_> { code: bytes_to_be_words(default_aa_bytecode), hash: default_aa_hash, }; + + let evm_emulator_code = if let Some(evm_emulator_hash) = evm_emulator_hash { + let evm_emulator_bytecode = self + .get_sealed_factory_dep(evm_emulator_hash) + .await + .context("failed loading EVM emulator code")? + .with_context(|| format!("EVM emulator code with hash {evm_emulator_hash:?} should be present in the database"))?; + + Some(SystemContractCode { + code: bytes_to_be_words(evm_emulator_bytecode), + hash: evm_emulator_hash, + }) + } else { + None + }; + Ok(BaseSystemContracts { bootloader: bootloader_code, default_aa: default_aa_code, + evm_emulator: evm_emulator_code, }) } diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 34e14387ca6..7e9a9eca9d4 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -44,6 +44,7 @@ pub(crate) struct StorageL1BatchHeader { pub used_contract_hashes: serde_json::Value, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, // `system_logs` are introduced as part of boojum and will be absent in all batches generated prior to boojum. @@ -82,6 +83,7 @@ impl StorageL1BatchHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self @@ -103,6 +105,7 @@ fn convert_l2_to_l1_logs(raw_logs: Vec>) -> Vec { fn convert_base_system_contracts_hashes( bootloader_code_hash: Option>, default_aa_code_hash: Option>, + evm_emulator_code_hash: Option>, ) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: bootloader_code_hash @@ -111,6 +114,7 @@ fn convert_base_system_contracts_hashes( default_aa: default_aa_code_hash .map(|hash| H256::from_slice(&hash)) .expect("should not be none"), + evm_emulator: evm_emulator_code_hash.map(|hash| H256::from_slice(&hash)), } } @@ -134,6 +138,7 @@ pub(crate) struct StorageL1Batch { pub zkporter_is_available: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub l2_to_l1_messages: Vec>, pub l2_l1_merkle_root: Option>, @@ -177,6 +182,7 @@ impl StorageL1Batch { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self @@ -240,6 +246,10 @@ impl TryFrom for L1BatchMetadata { .default_aa_code_hash .ok_or(L1BatchMetadataError::Incomplete("default_aa_code_hash"))?, ), + evm_emulator_code_hash: batch + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), protocol_version: batch .protocol_version .map(|v| (v as u16).try_into().unwrap()), @@ -275,6 +285,7 @@ pub(crate) struct StorageBlockDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: Option, } @@ -320,6 +331,7 @@ impl From for api::BlockDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::BlockDetails { @@ -352,6 +364,7 @@ pub(crate) struct StorageL1BatchDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, } impl From for api::L1BatchDetails { @@ -395,6 +408,7 @@ impl From for api::L1BatchDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::L1BatchDetails { @@ -418,6 +432,7 @@ pub(crate) struct StorageL2BlockHeader { // L2 gas price assumed in the corresponding batch pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, pub fair_pubdata_price: Option, @@ -471,6 +486,7 @@ impl From for L2BlockHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( row.bootloader_code_hash, row.default_aa_code_hash, + row.evm_emulator_code_hash, ), gas_per_pubdata_limit: row.gas_per_pubdata_limit as u64, protocol_version, diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index e53bf7b9d0a..a833236a7b6 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -16,6 +16,7 @@ pub struct StorageProtocolVersion { pub snark_wrapper_vk_hash: Vec, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, } pub(crate) fn protocol_version_from_storage( @@ -34,6 +35,10 @@ pub(crate) fn protocol_version_from_storage( base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: H256::from_slice(&storage_version.bootloader_code_hash), default_aa: H256::from_slice(&storage_version.default_account_code_hash), + evm_emulator: storage_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), }, tx, } @@ -45,6 +50,7 @@ pub struct StorageApiProtocolVersion { pub timestamp: i64, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, pub upgrade_tx_hash: Option>, } @@ -60,6 +66,10 @@ impl From for api::ProtocolVersion { storage_protocol_version.timestamp as u64, H256::from_slice(&storage_protocol_version.bootloader_code_hash), H256::from_slice(&storage_protocol_version.default_account_code_hash), + storage_protocol_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), l2_system_upgrade_tx_hash, ) } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 688a6f99790..cf7b76d8163 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -22,6 +22,7 @@ pub(crate) struct StorageSyncBlock { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: i32, pub virtual_blocks: i64, @@ -75,6 +76,12 @@ impl TryFrom for SyncBlock { .decode_column("bootloader_code_hash")?, default_aa: parse_h256_opt(block.default_aa_code_hash.as_deref()) .decode_column("default_aa_code_hash")?, + evm_emulator: block + .evm_emulator_code_hash + .as_deref() + .map(parse_h256) + .transpose() + .decode_column("evm_emulator_code_hash")?, }, fee_account_address: parse_h160(&block.fee_account_address) .decode_column("fee_account_address")?, diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index bb219ee1d61..78daaebb335 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -352,6 +352,16 @@ impl From for TransactionReceipt { .index_in_block .map_or_else(Default::default, U64::from); + // For better compatibility with various clients, we never return `None` recipient address. + let to = storage_receipt + .transfer_to + .or(storage_receipt.execute_contract_address) + .and_then(|addr| { + serde_json::from_value::>(addr) + .expect("invalid address value in the database") + }) + .unwrap_or_else(Address::zero); + let block_hash = H256::from_slice(&storage_receipt.block_hash); TransactionReceipt { transaction_hash: H256::from_slice(&storage_receipt.tx_hash), @@ -361,15 +371,7 @@ impl From for TransactionReceipt { l1_batch_tx_index: storage_receipt.l1_batch_tx_index.map(U64::from), l1_batch_number: storage_receipt.l1_batch_number.map(U64::from), from: H160::from_slice(&storage_receipt.initiator_address), - to: storage_receipt - .transfer_to - .or(storage_receipt.execute_contract_address) - .map(|addr| { - serde_json::from_value::
(addr) - .expect("invalid address value in the database") - }) - // For better compatibility with various clients, we never return null. - .or_else(|| Some(Address::default())), + to: Some(to), cumulative_gas_used: Default::default(), // TODO: Should be actually calculated (SMA-1183). gas_used: { let refunded_gas: U256 = storage_receipt.refunded_gas.into(); @@ -508,6 +510,10 @@ impl StorageApiTransaction { .signature .and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); + let to = serde_json::from_value(self.execute_contract_address) + .ok() + .unwrap_or_default(); + // For legacy and EIP-2930 transactions it is gas price willing to be paid by the sender in wei. // For other transactions it should be the effective gas price if transaction is included in block, // otherwise this value should be set equal to the max fee per gas. @@ -528,7 +534,7 @@ impl StorageApiTransaction { block_number: self.block_number.map(|number| U64::from(number as u64)), transaction_index: self.index_in_block.map(|idx| U64::from(idx as u64)), from: Some(Address::from_slice(&self.initiator_address)), - to: Some(serde_json::from_value(self.execute_contract_address).unwrap()), + to, value: bigdecimal_to_u256(self.value), gas_price: Some(bigdecimal_to_u256(gas_price)), gas: bigdecimal_to_u256(self.gas_limit.unwrap_or_else(BigDecimal::zero)), diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 72ae811ce76..3b500e07a08 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -45,17 +45,22 @@ impl ProtocolVersionsDal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash, created_at ) VALUES - ($1, $2, $3, $4, $5, NOW()) + ($1, $2, $3, $4, $5, $6, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, timestamp as i64, base_system_contracts_hashes.bootloader.as_bytes(), base_system_contracts_hashes.default_aa.as_bytes(), + base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), tx_hash.as_ref().map(H256::as_bytes), ) .instrument("save_protocol_version#minor") @@ -193,7 +198,8 @@ impl ProtocolVersionsDal<'_, '_> { r#" SELECT bootloader_code_hash, - default_account_code_hash + default_account_code_hash, + evm_emulator_code_hash FROM protocol_versions WHERE @@ -212,6 +218,7 @@ impl ProtocolVersionsDal<'_, '_> { .get_base_system_contracts( H256::from_slice(&row.bootloader_code_hash), H256::from_slice(&row.default_account_code_hash), + row.evm_emulator_code_hash.as_deref().map(H256::from_slice), ) .await?; Some(contracts) @@ -232,6 +239,7 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions.timestamp, protocol_versions.bootloader_code_hash, protocol_versions.default_account_code_hash, + protocol_versions.evm_emulator_code_hash, protocol_patches.patch, protocol_patches.snark_wrapper_vk_hash FROM diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index a3a7a162c3d..adc3957f872 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -21,6 +21,7 @@ impl ProtocolVersionsWeb3Dal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash FROM protocol_versions diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index ec6ee0f9281..ab5684007d0 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -50,6 +50,7 @@ impl SyncDal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + miniblocks.evm_emulator_code_hash, miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index dc672fa1f80..bf85008f7b5 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -54,6 +54,7 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { logs_bloom: Default::default(), } } + pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { L1BatchHeader::new( L1BatchNumber(number), @@ -61,6 +62,7 @@ pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: Some(H256::repeat_byte(43)), }, ProtocolVersionId::latest(), ) diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index dcf5f25f104..c2209bb9c93 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -607,6 +607,39 @@ mod tests { ); } + #[tokio::test] + async fn getting_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + let mut tx = mock_l2_transaction(); + tx.execute.contract_address = None; + let tx_hash = tx.hash(); + prepare_transactions(&mut conn, vec![tx.clone()]).await; + + let fetched_tx = conn + .transactions_dal() + .get_tx_by_hash(tx_hash) + .await + .unwrap() + .expect("no transaction"); + let mut fetched_tx = L2Tx::try_from(fetched_tx).unwrap(); + assert_eq!(fetched_tx.execute.contract_address, None); + fetched_tx.raw_bytes = tx.raw_bytes.clone(); + assert_eq!(fetched_tx, tx); + + let web3_tx = conn + .transactions_web3_dal() + .get_transaction_by_position(L2BlockNumber(1), 0, L2ChainId::from(270)) + .await; + let web3_tx = web3_tx.unwrap().expect("no transaction"); + assert_eq!(web3_tx.hash, tx_hash); + assert_eq!(web3_tx.to, None); + } + #[tokio::test] async fn getting_receipts() { let connection_pool = ConnectionPool::::test_pool().await; @@ -621,7 +654,7 @@ mod tests { let tx2 = mock_l2_transaction(); let tx2_hash = tx2.hash(); - prepare_transactions(&mut conn, vec![tx1.clone(), tx2.clone()]).await; + prepare_transactions(&mut conn, vec![tx1, tx2]).await; let mut receipts = conn .transactions_web3_dal() @@ -636,6 +669,31 @@ mod tests { assert_eq!(receipts[1].transaction_hash, tx2_hash); } + #[tokio::test] + async fn getting_receipt_for_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut tx = mock_l2_transaction(); + let tx_hash = tx.hash(); + tx.execute.contract_address = None; + prepare_transactions(&mut conn, vec![tx]).await; + + let receipts = conn + .transactions_web3_dal() + .get_transaction_receipts(&[tx_hash]) + .await + .unwrap(); + assert_eq!(receipts.len(), 1); + let receipt = receipts.into_iter().next().unwrap(); + assert_eq!(receipt.transaction_hash, tx_hash); + assert_eq!(receipt.to, Some(Address::zero())); + } + #[tokio::test] async fn getting_l2_block_transactions() { let connection_pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/env_config/src/chain.rs b/core/lib/env_config/src/chain.rs index a25c593bd88..a125f331496 100644 --- a/core/lib/env_config/src/chain.rs +++ b/core/lib/env_config/src/chain.rs @@ -102,6 +102,7 @@ mod tests { default_aa_hash: Some(hash( "0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066", )), + evm_emulator_hash: None, l1_batch_commit_data_generator_mode, max_circuits_per_batch: 24100, protective_reads_persistence_enabled: true, diff --git a/core/lib/env_config/src/genesis.rs b/core/lib/env_config/src/genesis.rs index bf30fd4cc33..55c79eceb50 100644 --- a/core/lib/env_config/src/genesis.rs +++ b/core/lib/env_config/src/genesis.rs @@ -68,6 +68,7 @@ impl FromEnv for GenesisConfig { genesis_commitment: contracts_config.genesis_batch_commitment, bootloader_hash: state_keeper.bootloader_hash, default_aa_hash: state_keeper.default_aa_hash, + evm_emulator_hash: state_keeper.evm_emulator_hash, // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network l1_chain_id: L1ChainId(network_config.network.chain_id().0), sl_chain_id: Some(network_config.network.chain_id()), diff --git a/core/lib/l1_contract_interface/src/multicall3/mod.rs b/core/lib/l1_contract_interface/src/multicall3/mod.rs index 7d922668f94..52df37e0430 100644 --- a/core/lib/l1_contract_interface/src/multicall3/mod.rs +++ b/core/lib/l1_contract_interface/src/multicall3/mod.rs @@ -7,6 +7,7 @@ use zksync_types::{ }; /// Multicall3 contract aggregate method input vector struct. +#[derive(Debug)] pub struct Multicall3Call { pub target: Address, pub allow_failure: bool, @@ -21,6 +22,7 @@ impl Tokenizable for Multicall3Call { self.calldata.into_token(), ]) } + fn from_token(token: Token) -> Result { let Token::Tuple(mut result_token) = token else { return Err(error(&[token], "Multicall3Call")); diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 7d604157d1a..ab418d24cd1 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -37,10 +37,10 @@ once_cell.workspace = true thiserror.workspace = true tracing.workspace = true vise.workspace = true +ethabi.workspace = true [dev-dependencies] assert_matches.workspace = true pretty_assertions.workspace = true zksync_test_account.workspace = true -ethabi.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index ce928e652d7..50bb19938fe 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -47,6 +47,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -103,6 +104,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -158,6 +160,7 @@ impl GlueFrom for crate::interface: circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -227,6 +230,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -259,6 +263,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -307,6 +312,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs index 3cb61b461a4..4c4cffcc687 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs @@ -22,6 +22,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -48,6 +49,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -74,6 +76,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 2dc680ba77d..8978d4348ed 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -66,12 +66,14 @@ impl GlueFrom VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -100,12 +102,14 @@ impl logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, TxRevertReason::Halt(halt) => VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -129,6 +133,7 @@ impl GlueFrom { unreachable!("Halt is the only revert reason for VM 5") diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs index 2160c4b56a0..cc199fef941 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs @@ -99,6 +99,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs index d42d1880933..f6e49cd8b14 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs @@ -96,6 +96,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs index 79669eddd56..b8b939f8673 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs @@ -93,6 +93,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 62fa82f52f2..0447304f69f 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -31,7 +31,7 @@ fn test_get_used_contracts() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build(); - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); // create and push and execute some not-empty factory deps transaction with success status // to check that `get_decommitted_hashes()` updates @@ -50,7 +50,7 @@ fn test_get_used_contracts() { // Note: `Default_AA` will be in the list of used contracts if L2 tx is used assert_eq!( vm.vm.decommitted_hashes().collect::>(), - known_bytecodes_without_aa_code(&vm.vm) + known_bytecodes_without_base_system_contracts(&vm.vm) ); // create push and execute some non-empty factory deps transaction that fails @@ -83,20 +83,26 @@ fn test_get_used_contracts() { for factory_dep in tx2.execute.factory_deps { let hash = hash_bytecode(&factory_dep); let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm).contains(&hash_to_u256)); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).contains(&hash_to_u256)); assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); } } -fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet { - let mut known_bytecodes_without_aa_code = vm +fn known_bytecodes_without_base_system_contracts(vm: &Vm) -> HashSet { + let mut known_bytecodes_without_base_system_contracts = vm .world .bytecode_cache .keys() .cloned() .collect::>(); - known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - known_bytecodes_without_aa_code + known_bytecodes_without_base_system_contracts + .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + let was_removed = + known_bytecodes_without_base_system_contracts.remove(&h256_to_u256(evm_emulator.hash)); + assert!(was_removed); + } + known_bytecodes_without_base_system_contracts } /// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index 88fe2dab5c9..b4448683cf7 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -107,7 +107,7 @@ fn test_require_eip712() { let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); l2_tx.set_input(aa_tx, hash); // Pretend that operator is malicious and sets the initiator to the AA account. l2_tx.common_data.initiator_address = account_abstraction.address; @@ -157,7 +157,7 @@ fn test_require_eip712() { let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); let transaction: Transaction = l2_tx.into(); diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 10be6d88b04..0c20af57e03 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -639,6 +639,7 @@ impl VmInterface for Vm { total_log_queries: 0, }, refunds: result.refunds, + new_known_factory_deps: None, } } diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index b8242fa7ca8..e70f05f85ef 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -14,6 +14,7 @@ use crate::{ circuits_capacity::circuit_statistic_from_cycles, dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer, }, + utils::extract_bytecodes_marked_as_known, vm::Vm, }, HistoryMode, @@ -55,6 +56,10 @@ impl Vm { .then_some(RefundsTracer::new(self.batch_env.clone(), self.subversion)); let mut tx_tracer: DefaultExecutionTracer = DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, + self.system_env + .base_system_smart_contracts + .evm_emulator + .is_some(), execution_mode, mem::take(dispatcher), self.storage.clone(), @@ -95,6 +100,8 @@ impl Vm { circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); + let factory_deps_marked_as_known = extract_bytecodes_marked_as_known(&logs.events); + let new_known_factory_deps = self.decommit_bytecodes(&factory_deps_marked_as_known); *dispatcher = tx_tracer.dispatcher; let result = VmExecutionResultAndLogs { @@ -102,6 +109,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: Some(new_known_factory_deps), }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs index 98d71efa00f..6dd73866adf 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs @@ -59,7 +59,12 @@ impl Vm { tx: Transaction, with_compression: bool, ) { - let tx: TransactionData = tx.into(); + let use_evm_emulator = self + .system_env + .base_system_smart_contracts + .evm_emulator + .is_some(); + let tx = TransactionData::new(tx, use_evm_emulator); let overhead = tx.overhead_gas(); self.push_raw_transaction(tx, overhead, 0, with_compression); } diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs index 0315aa38327..d91fbfdb24d 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs @@ -5,9 +5,7 @@ use zk_evm_1_5_0::{ aux_structures::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, - zkevm_opcode_defs::{ - ContractCodeSha256, VersionedHashDef, VersionedHashHeader, VersionedHashNormalizedPreimage, - }, + zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, }; use zksync_types::{H256, U256}; use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; @@ -166,8 +164,8 @@ impl DecommittmentProcess _monotonic_cycle_counter: u32, mut partial_query: DecommittmentQuery, ) -> anyhow::Result { - let (stored_hash, length) = stored_hash_from_query(&partial_query); - partial_query.decommitted_length = length; + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); if let Some(memory_page) = self .decommitted_code_hashes @@ -178,10 +176,10 @@ impl DecommittmentProcess { partial_query.is_fresh = false; partial_query.memory_page = MemoryPage(memory_page); + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } else { - partial_query.is_fresh = true; if self .decommitted_code_hashes .inner() @@ -190,7 +188,9 @@ impl DecommittmentProcess { self.decommitted_code_hashes .insert(stored_hash, None, partial_query.timestamp); - } + }; + partial_query.is_fresh = true; + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } @@ -204,11 +204,10 @@ impl DecommittmentProcess memory: &mut M, ) -> anyhow::Result>> { assert!(partial_query.is_fresh); - self.decommitment_requests.push((), partial_query.timestamp); - let stored_hash = stored_hash_from_query(&partial_query).0; - + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); // We are fetching a fresh bytecode that we didn't read before. let values = self.get_bytecode(stored_hash, partial_query.timestamp); let page_to_use = partial_query.memory_page; @@ -251,28 +250,49 @@ impl DecommittmentProcess } } -fn concat_header_and_preimage( - header: VersionedHashHeader, - normalized_preimage: VersionedHashNormalizedPreimage, -) -> [u8; 32] { - let mut buffer = [0u8; 32]; +#[derive(Debug)] +// TODO: consider moving this to the zk-evm crate +enum VersionedCodeHash { + ZkEVM(VersionedHashHeader, VersionedHashNormalizedPreimage), + Evm(VersionedHashHeader, VersionedHashNormalizedPreimage), +} - buffer[0..4].copy_from_slice(&header.0); - buffer[4..32].copy_from_slice(&normalized_preimage.0); +impl VersionedCodeHash { + fn from_query(query: &DecommittmentQuery) -> Self { + match query.header.0[0] { + 1 => Self::ZkEVM(query.header, query.normalized_preimage), + 2 => Self::Evm(query.header, query.normalized_preimage), + _ => panic!("Unsupported hash version"), + } + } - buffer -} + /// Returns the hash in the format it is stored in the DB. + fn to_stored_hash(&self) -> U256 { + let (header, preimage) = match self { + Self::ZkEVM(header, preimage) => (header, preimage), + Self::Evm(header, preimage) => (header, preimage), + }; -/// For a given decommitment query, returns a pair of the stored hash as U256 and the length of the preimage in 32-byte words. -fn stored_hash_from_query(partial_query: &DecommittmentQuery) -> (U256, u16) { - let full_hash = - concat_header_and_preimage(partial_query.header, partial_query.normalized_preimage); + let mut hash = [0u8; 32]; + hash[0..4].copy_from_slice(&header.0); + hash[4..32].copy_from_slice(&preimage.0); - let versioned_hash = - ContractCodeSha256::try_deserialize(full_hash).expect("Invalid ContractCodeSha256 hash"); + // Hash[1] is used in both of the versions to denote whether the bytecode is being constructed. + // We ignore this param. + hash[1] = 0; - let stored_hash = H256(ContractCodeSha256::serialize_to_stored(versioned_hash).unwrap()); - let length = versioned_hash.code_length_in_words; + h256_to_u256(H256(hash)) + } - (h256_to_u256(stored_hash), length) + fn get_preimage_length(&self) -> u32 { + // In zkEVM the hash[2..3] denotes the length of the preimage in words, while + // in EVM the hash[2..3] denotes the length of the preimage in bytes. + match self { + Self::ZkEVM(header, _) => { + let length_in_words = header.0[2] as u32 * 256 + header.0[3] as u32; + length_in_words * 32 + } + Self::Evm(header, _) => header.0[2] as u32 * 256 + header.0[3] as u32, + } + } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs new file mode 100644 index 00000000000..ca8157b170d --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -0,0 +1,76 @@ +use ethabi::Token; +use zksync_contracts::read_bytecode; +use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_types::{get_code_key, get_known_code_key, Execute, H256}; +use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::VmInterfaceExt; + +use crate::{ + interface::{storage::InMemoryStorage, TxExecutionMode}, + versions::testonly::default_system_env, + vm_latest::{tests::tester::VmTesterBuilder, utils::hash_evm_bytecode, HistoryEnabled}, +}; + +const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; +const MOCK_KNOWN_CODE_STORAGE_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockKnownCodeStorage.json"; + +#[test] +fn tracing_evm_contract_deployment() { + let mock_deployer = read_bytecode(MOCK_DEPLOYER_PATH); + let mock_deployer_hash = hash_bytecode(&mock_deployer); + let mock_known_code_storage = read_bytecode(MOCK_KNOWN_CODE_STORAGE_PATH); + let mock_known_code_storage_hash = hash_bytecode(&mock_known_code_storage); + + // Override + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); + storage.set_value( + get_known_code_key(&mock_deployer_hash), + H256::from_low_u64_be(1), + ); + storage.set_value( + get_code_key(&KNOWN_CODES_STORAGE_ADDRESS), + mock_known_code_storage_hash, + ); + storage.set_value( + get_known_code_key(&mock_known_code_storage_hash), + H256::from_low_u64_be(1), + ); + storage.store_factory_dep(mock_deployer_hash, mock_deployer); + storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); + + let mut system_env = default_system_env(); + // The EVM emulator will not be accessed, so we set it to a dummy value. + system_env.base_system_smart_contracts.evm_emulator = + Some(system_env.base_system_smart_contracts.default_aa.clone()); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let account = &mut vm.rich_accounts[0]; + + let args = [Token::Bytes((0..=u8::MAX).collect())]; + let evm_bytecode = ethabi::encode(&args); + let expected_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); + let deploy_tx = account.get_l2_tx_for_execute(execute, None); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Check that the surrogate EVM bytecode was added to the decommitter. + let known_bytecodes = vm.vm.state.decommittment_processor.known_bytecodes.inner(); + let known_evm_bytecode = + be_words_to_bytes(&known_bytecodes[&h256_to_u256(expected_bytecode_hash)]); + assert_eq!(known_evm_bytecode, evm_bytecode); + + let new_known_factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!(new_known_factory_deps.len(), 2); // the deployed EraVM contract + EVM contract + assert_eq!( + new_known_factory_deps[&expected_bytecode_hash], + evm_bytecode + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index ef19717a627..d7cadc54b44 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -41,7 +41,7 @@ fn test_get_used_contracts() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build(); - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); // create and push and execute some not-empty factory deps transaction with success status // to check that `get_used_contracts()` updates @@ -63,7 +63,7 @@ fn test_get_used_contracts() { .get_used_contracts() .into_iter() .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) + known_bytecodes_without_base_system_contracts(&vm.vm) .keys() .cloned() .collect::>() @@ -99,7 +99,7 @@ fn test_get_used_contracts() { for factory_dep in tx2.execute.factory_deps { let hash = hash_bytecode(&factory_dep); let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) + assert!(known_bytecodes_without_base_system_contracts(&vm.vm) .keys() .contains(&hash_to_u256)); assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); @@ -147,19 +147,24 @@ fn test_contract_is_used_right_after_prepare_to_decommit() { assert_eq!(vm.vm.get_used_contracts(), vec![bytecode_hash]); } -fn known_bytecodes_without_aa_code( +fn known_bytecodes_without_base_system_contracts( vm: &Vm, ) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm + let mut known_bytecodes_without_base_system_contracts = vm .state .decommittment_processor .known_bytecodes .inner() .clone(); - known_bytecodes_without_aa_code + known_bytecodes_without_base_system_contracts .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) .unwrap(); - known_bytecodes_without_aa_code + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + known_bytecodes_without_base_system_contracts + .remove(&h256_to_u256(evm_emulator.hash)) + .unwrap(); + } + known_bytecodes_without_base_system_contracts } /// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 0fc12848227..4bb32cdf7ae 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -52,7 +52,7 @@ fn test_l1_tx_execution() { let contract_code = read_test_contract(); let account = &mut vm.rich_accounts[0]; let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); + let tx_data = TransactionData::new(deploy_tx.tx.clone(), false); let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { shard_id: 0, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index 1203d61b80b..112be637fe0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -8,6 +8,7 @@ mod call_tracer; mod circuits; mod code_oracle; mod constants; +mod evm_emulator; mod gas_limit; mod get_used_contracts; mod is_write_initial; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 91d78c69a93..6be49367d39 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -61,19 +61,17 @@ fn test_nonce_holder() { // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ) - .into(); - + let tx = account.get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: Some(account.address), + calldata: vec![12], + value: Default::default(), + factory_deps: vec![], + }, + None, + Nonce(nonce), + ); + let mut transaction_data = TransactionData::new(tx, false); transaction_data.signature = vec![test_mode.into()]; vm.vm.push_raw_transaction(transaction_data, 0, 0, true); let result = vm.vm.execute(VmExecutionMode::OneTx); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index cc0085f2025..c00192aa8f1 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -62,7 +62,7 @@ fn test_predetermined_refunded_gas() { .with_rich_accounts(vec![account.clone()]) .build(); - let tx: TransactionData = tx.into(); + let tx = TransactionData::new(tx, false); // Overhead let overhead = tx.overhead_gas(); vm.vm diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index a6dc7118005..1f38c6f947e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -98,7 +98,7 @@ fn test_require_eip712() { let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); l2_tx.set_input(aa_tx, hash); // Pretend that operator is malicious and sets the initiator to the AA account. l2_tx.common_data.initiator_address = account_abstraction.address; @@ -148,7 +148,7 @@ fn test_require_eip712() { let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); let transaction: Transaction = l2_tx.into(); diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs index 6a908c2a73e..2ae5e81a328 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs @@ -13,7 +13,7 @@ use zk_evm_1_5_0::{ zkevm_opcode_defs::{decoding::EncodingModeProduction, Opcode, RetOpcode}, }; -use super::PubdataTracer; +use super::{EvmDeployTracer, PubdataTracer}; use crate::{ glue::GlueInto, interface::{ @@ -38,7 +38,7 @@ use crate::{ }; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. -pub(crate) struct DefaultExecutionTracer { +pub struct DefaultExecutionTracer { tx_has_been_processed: bool, execution_mode: VmExecutionMode, @@ -63,14 +63,18 @@ pub(crate) struct DefaultExecutionTracer { // It only takes into account circuits that are generated for actual execution. It doesn't // take into account e.g circuits produced by the initial bootloader memory commitment. pub(crate) circuits_tracer: CircuitsTracer, + // This tracer is responsible for handling EVM deployments and providing the data to the code decommitter. + pub(crate) evm_deploy_tracer: Option>, subversion: MultiVMSubversion, storage: StoragePtr, _phantom: PhantomData, } impl DefaultExecutionTracer { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( computational_gas_limit: u32, + use_evm_emulator: bool, execution_mode: VmExecutionMode, dispatcher: TracerDispatcher, storage: StoragePtr, @@ -92,6 +96,7 @@ impl DefaultExecutionTracer { pubdata_tracer, ret_from_the_bootloader: None, circuits_tracer: CircuitsTracer::new(), + evm_deploy_tracer: use_evm_emulator.then(EvmDeployTracer::new), storage, _phantom: PhantomData, } @@ -172,6 +177,9 @@ macro_rules! dispatch_tracers { tracer.$function($( $params ),*); } $self.circuits_tracer.$function($( $params ),*); + if let Some(tracer) = &mut $self.evm_deploy_tracer { + tracer.$function($( $params ),*); + } }; } @@ -289,6 +297,12 @@ impl DefaultExecutionTracer { .finish_cycle(state, bootloader_state) .stricter(&result); + if let Some(evm_deploy_tracer) = &mut self.evm_deploy_tracer { + result = evm_deploy_tracer + .finish_cycle(state, bootloader_state) + .stricter(&result); + } + result.stricter(&self.should_stop_execution()) } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs new file mode 100644 index 00000000000..d91ee13a920 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs @@ -0,0 +1,105 @@ +use std::{marker::PhantomData, mem}; + +use zk_evm_1_5_0::{ + aux_structures::Timestamp, + tracing::{AfterExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{ + FarCallOpcode, FatPointer, Opcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, + }, +}; +use zksync_types::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_vm_interface::storage::StoragePtr; + +use super::{traits::VmTracer, utils::read_pointer}; +use crate::{ + interface::{storage::WriteStorage, tracer::TracerExecutionStatus}, + tracers::dynamic::vm_1_5_0::DynTracer, + vm_latest::{ + utils::hash_evm_bytecode, BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState, + }, +}; + +/// Tracer responsible for collecting information about EVM deploys and providing those +/// to the code decommitter. +#[derive(Debug)] +pub(crate) struct EvmDeployTracer { + tracked_signature: [u8; 4], + pending_bytecodes: Vec>, + _phantom: PhantomData, +} + +impl EvmDeployTracer { + pub(crate) fn new() -> Self { + let tracked_signature = + ethabi::short_signature("publishEVMBytecode", &[ethabi::ParamType::Bytes]); + + Self { + tracked_signature, + pending_bytecodes: vec![], + _phantom: PhantomData, + } + } +} + +impl DynTracer> for EvmDeployTracer { + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &SimpleMemory, + _storage: StoragePtr, + ) { + if !matches!( + data.opcode.variant.opcode, + Opcode::FarCall(FarCallOpcode::Normal) + ) { + return; + }; + + let current = state.vm_local_state.callstack.current; + let from = current.msg_sender; + let to = current.this_address; + if from != CONTRACT_DEPLOYER_ADDRESS || to != KNOWN_CODES_STORAGE_ADDRESS { + return; + } + + let calldata_ptr = + state.vm_local_state.registers[usize::from(CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER)]; + let data = read_pointer(memory, FatPointer::from_u256(calldata_ptr.value)); + if data.len() < 4 { + return; + } + let (signature, data) = data.split_at(4); + if signature != self.tracked_signature { + return; + } + + match ethabi::decode(&[ethabi::ParamType::Bytes], data) { + Ok(decoded) => { + let published_bytecode = decoded.into_iter().next().unwrap().into_bytes().unwrap(); + self.pending_bytecodes.push(published_bytecode); + } + Err(err) => tracing::error!("Unable to decode `publishEVMBytecode` call: {err}"), + } + } +} + +impl VmTracer for EvmDeployTracer { + fn finish_cycle( + &mut self, + state: &mut ZkSyncVmState, + _bootloader_state: &mut BootloaderState, + ) -> TracerExecutionStatus { + for published_bytecode in mem::take(&mut self.pending_bytecodes) { + let hash = hash_evm_bytecode(&published_bytecode); + let as_words = bytes_to_be_words(published_bytecode); + + state.decommittment_processor.populate( + vec![(h256_to_u256(hash), as_words)], + Timestamp(state.local_state.timestamp), + ); + } + TracerExecutionStatus::Continue + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs index fe916e19e8c..82721a32264 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs @@ -1,11 +1,13 @@ pub(crate) use circuits_tracer::CircuitsTracer; pub(crate) use default_tracers::DefaultExecutionTracer; +pub(crate) use evm_deploy_tracer::EvmDeployTracer; pub(crate) use pubdata_tracer::PubdataTracer; pub(crate) use refunds::RefundsTracer; pub(crate) use result_tracer::ResultTracer; pub(crate) mod circuits_tracer; pub(crate) mod default_tracers; +pub(crate) mod evm_deploy_tracer; pub(crate) mod pubdata_tracer; pub(crate) mod refunds; pub(crate) mod result_tracer; diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 2ec86eb3cea..90948f2f89f 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -46,8 +46,8 @@ pub(crate) struct TransactionData { pub(crate) raw_bytes: Option>, } -impl From for TransactionData { - fn from(execute_tx: Transaction) -> Self { +impl TransactionData { + pub(crate) fn new(execute_tx: Transaction, use_evm_emulator: bool) -> Self { match execute_tx.common_data { ExecuteTransactionCommon::L2(common_data) => { let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); @@ -62,6 +62,19 @@ impl From for TransactionData { U256::zero() }; + let should_deploy_contract = if execute_tx.execute.contract_address.is_none() { + // Transactions with no `contract_address` should be filtered out by the API server, + // so this is more of a sanity check. + assert!( + use_evm_emulator, + "`execute.contract_address` not set for transaction {:?} with EVM emulation disabled", + common_data.hash() + ); + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + // Ethereum transactions do not sign gas per pubdata limit, and so for them we need to use // some default value. We use the maximum possible value that is allowed by the bootloader // (i.e. we can not use u64::MAX, because the bootloader requires gas per pubdata for such @@ -85,7 +98,7 @@ impl From for TransactionData { value: execute_tx.execute.value, reserved: [ should_check_chain_id, - U256::zero(), + should_deploy_contract, U256::zero(), U256::zero(), ], diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index 6f9522572ad..cb4b13eecdf 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -98,6 +98,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + if let Some(evm_emulator) = &system_env.base_system_smart_contracts.evm_emulator { + decommittment_processor.populate( + vec![(h256_to_u256(evm_emulator.hash), evm_emulator.code.clone())], + Timestamp(0), + ); + } + memory.populate( vec![( BOOTLOADER_CODE_PAGE, @@ -117,6 +124,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + // By convention, default AA is used as a fallback if the EVM emulator is not available. + let evm_emulator_code_hash = system_env + .base_system_smart_contracts + .evm_emulator + .as_ref() + .unwrap_or(&system_env.base_system_smart_contracts.default_aa) + .hash; let mut vm = VmState::empty_state( storage_oracle, memory, @@ -128,11 +142,7 @@ pub(crate) fn new_vm_state( default_aa_code_hash: h256_to_u256( system_env.base_system_smart_contracts.default_aa.hash, ), - // For now, the default account hash is used as the code hash for the EVM simulator. - // In the 1.5.0 version, it is not possible to instantiate EVM bytecode. - evm_simulator_code_hash: h256_to_u256( - system_env.base_system_smart_contracts.default_aa.hash, - ), + evm_simulator_code_hash: h256_to_u256(evm_emulator_code_hash), zkporter_is_available: system_env.zk_porter_available, }, ); diff --git a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs index 0fb803de5d4..e07d3eda7c4 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs @@ -1,6 +1,57 @@ -/// Utility functions for the VM. +//! Utility functions for the VM. + +use once_cell::sync::Lazy; +use zk_evm_1_5_0::{ + aux_structures::MemoryPage, + sha2, + zkevm_opcode_defs::{BlobSha256Format, VersionedHashLen32}, +}; +use zksync_types::{H256, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_vm_interface::VmEvent; + pub mod fee; pub mod l2_blocks; pub(crate) mod logs; pub mod overhead; pub mod transaction_encoding; + +pub(crate) fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { + use sha2::{Digest, Sha256}; + let mut hasher = Sha256::new(); + let len = bytecode.len() as u16; + hasher.update(bytecode); + let result = hasher.finalize(); + + let mut output = [0u8; 32]; + output[..].copy_from_slice(result.as_slice()); + output[0] = BlobSha256Format::VERSION_BYTE; + output[1] = 0; + output[2..4].copy_from_slice(&len.to_be_bytes()); + + H256(output) +} + +pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 2) +} + +/// Extracts all bytecodes marked as known on the system contracts. +pub fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { + static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { + ethabi::long_signature( + "MarkedAsKnown", + &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], + ) + }); + + all_generated_events + .iter() + .filter(|event| { + // Filter events from the deployer contract that match the expected signature. + event.address == KNOWN_CODES_STORAGE_ADDRESS + && event.indexed_topics.len() == 3 + && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE + }) + .map(|event| event.indexed_topics[1]) + .collect() +} diff --git a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs index 86c49a3eb15..ed532f89dbc 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs @@ -10,7 +10,9 @@ pub trait TransactionVmExt { impl TransactionVmExt for Transaction { fn bootloader_encoding_size(&self) -> usize { - let transaction_data: TransactionData = self.clone().into(); + // Since we want to just measure the encoding size, `use_evm_emulator` arg doesn't matter here, + // so we use a more lenient option. + let transaction_data = TransactionData::new(self.clone(), true); transaction_data.into_tokens().len() } } diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 8ccd600a79e..f4cc1580e93 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,10 +1,12 @@ +use std::collections::HashMap; + use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, vm::VmVersion, Transaction, H256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{be_words_to_bytes, h256_to_u256, u256_to_h256}; use crate::{ glue::GlueInto, @@ -79,6 +81,20 @@ impl Vm { self.state.local_state.callstack.current.ergs_remaining } + pub(crate) fn decommit_bytecodes(&self, hashes: &[H256]) -> HashMap> { + let bytecodes = hashes.iter().map(|&hash| { + let bytecode_words = self + .state + .decommittment_processor + .known_bytecodes + .inner() + .get(&h256_to_u256(hash)) + .unwrap_or_else(|| panic!("Bytecode with hash {hash:?} not found")); + (hash, be_words_to_bytes(bytecode_words)) + }); + bytecodes.collect() + } + // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs index 8196760a621..9462a89be2a 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs @@ -90,6 +90,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs index c48d48edd3b..b1ad4d257b7 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs @@ -88,6 +88,7 @@ impl Vm { .refund_tracer .map(|r| r.get_refunds()) .unwrap_or_default(), + new_known_factory_deps: None, }; tx_tracer.dispatcher.save_results(&mut result); diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs index f91bf07e43f..2f8ac8df07e 100644 --- a/core/lib/protobuf_config/src/chain.rs +++ b/core/lib/protobuf_config/src/chain.rs @@ -86,6 +86,7 @@ impl ProtoRepr for proto::StateKeeper { // needed during the initialization from files bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, fee_account_addr: None, l1_batch_commit_data_generator_mode: Default::default(), }) diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 59896aa244d..7ecc768100f 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -75,6 +75,12 @@ impl ProtoRepr for proto::Genesis { .and_then(|x| parse_h256(x)) .context("default_aa_hash")?, ), + evm_emulator_hash: self + .evm_emulator_hash + .as_deref() + .map(parse_h256) + .transpose() + .context("evm_emulator_hash")?, l1_chain_id: required(&self.l1_chain_id) .map(|x| L1ChainId(*x)) .context("l1_chain_id")?, @@ -105,6 +111,7 @@ impl ProtoRepr for proto::Genesis { genesis_protocol_semantic_version: this.protocol_version.map(|x| x.to_string()), default_aa_hash: this.default_aa_hash.map(|x| format!("{:?}", x)), bootloader_hash: this.bootloader_hash.map(|x| format!("{:?}", x)), + evm_emulator_hash: this.evm_emulator_hash.map(|x| format!("{:?}", x)), fee_account: Some(format!("{:?}", this.fee_account)), l1_chain_id: Some(this.l1_chain_id.0), l2_chain_id: Some(this.l2_chain_id.as_u64()), diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index a0f4d45214f..c89199359aa 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -12,11 +12,14 @@ mod commitment_generator; mod consensus; mod contract_verifier; mod contracts; +mod da_client; mod da_dispatcher; mod database; mod en; mod eth; mod experimental; +mod external_price_api_client; +mod external_proof_integration_api; mod general; mod genesis; mod house_keeper; @@ -25,15 +28,11 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; +mod prover_job_monitor; mod pruning; mod secrets; -mod snapshots_creator; - -mod da_client; -mod external_price_api_client; -mod external_proof_integration_api; -mod prover_job_monitor; mod snapshot_recovery; +mod snapshots_creator; #[cfg(test)] mod tests; mod utils; diff --git a/core/lib/protobuf_config/src/proto/config/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto index 08cbb954fcb..e3a9a45366f 100644 --- a/core/lib/protobuf_config/src/proto/config/genesis.proto +++ b/core/lib/protobuf_config/src/proto/config/genesis.proto @@ -28,5 +28,6 @@ message Genesis { optional Prover prover = 10; optional L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 29; // optional, default to rollup optional string genesis_protocol_semantic_version = 12; // optional; + optional string evm_emulator_hash = 13; // optional; h256 reserved 11; reserved "shared_bridge"; } diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 22a20223c8b..8fe192a5f51 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -144,6 +144,8 @@ pub struct VMRunWitnessInputData { pub protocol_version: ProtocolVersionId, pub bootloader_code: Vec<[u8; 32]>, pub default_account_code_hash: U256, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub evm_emulator_code_hash: Option, pub storage_refunds: Vec, pub pubdata_costs: Vec, pub witness_block_state: WitnessStorageState, diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 68b25416d66..86b563f823e 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -306,6 +306,7 @@ mod tests { code: vec![U256([1; 4])], hash: H256([1; 32]), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 1e5a1b3fe65..103b6de1fb3 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -643,7 +643,7 @@ pub struct ProtocolVersion { /// Verifier configuration #[deprecated] pub verification_keys_hashes: Option, - /// Hashes of base system contracts (bootloader and default account) + /// Hashes of base system contracts (bootloader, default account and evm simulator) #[deprecated] pub base_system_contracts: Option, /// Bootloader code hash @@ -652,6 +652,9 @@ pub struct ProtocolVersion { /// Default account code hash #[serde(rename = "defaultAccountCodeHash")] pub default_account_code_hash: Option, + /// EVM emulator code hash + #[serde(rename = "evmSimulatorCodeHash")] + pub evm_emulator_code_hash: Option, /// L2 Upgrade transaction hash #[deprecated] pub l2_system_upgrade_tx_hash: Option, @@ -667,6 +670,7 @@ impl ProtocolVersion { timestamp: u64, bootloader_code_hash: H256, default_account_code_hash: H256, + evm_emulator_code_hash: Option, l2_system_upgrade_tx_hash: Option, ) -> Self { Self { @@ -677,9 +681,11 @@ impl ProtocolVersion { base_system_contracts: Some(BaseSystemContractsHashes { bootloader: bootloader_code_hash, default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, }), bootloader_code_hash: Some(bootloader_code_hash), default_account_code_hash: Some(default_account_code_hash), + evm_emulator_code_hash, l2_system_upgrade_tx_hash, l2_system_upgrade_tx_hash_new: l2_system_upgrade_tx_hash, } @@ -695,6 +701,13 @@ impl ProtocolVersion { .or_else(|| self.base_system_contracts.map(|hashes| hashes.default_aa)) } + pub fn evm_emulator_code_hash(&self) -> Option { + self.evm_emulator_code_hash.or_else(|| { + self.base_system_contracts + .and_then(|hashes| hashes.evm_emulator) + }) + } + pub fn minor_version(&self) -> Option { self.minor_version.or(self.version_id) } @@ -917,6 +930,7 @@ mod tests { base_system_contracts: Some(Default::default()), bootloader_code_hash: Some(Default::default()), default_account_code_hash: Some(Default::default()), + evm_emulator_code_hash: Some(Default::default()), l2_system_upgrade_tx_hash: Default::default(), l2_system_upgrade_tx_hash_new: Default::default(), }; diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 63d1bad486f..759ee8947ba 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -467,6 +467,7 @@ pub struct L1BatchMetaParameters { pub zkporter_is_available: bool, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: Option, } @@ -482,10 +483,11 @@ impl L1BatchMetaParameters { .protocol_version .map_or(false, |ver| ver.is_post_1_5_0()) { - // EVM simulator hash for now is the same as the default AA hash. - result.extend(self.default_aa_code_hash.as_bytes()); + let evm_emulator_code_hash = self + .evm_emulator_code_hash + .unwrap_or(self.default_aa_code_hash); + result.extend(evm_emulator_code_hash.as_bytes()); } - result } @@ -551,6 +553,7 @@ impl L1BatchCommitment { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: input.common().bootloader_code_hash, default_aa_code_hash: input.common().default_aa_code_hash, + evm_emulator_code_hash: input.common().evm_emulator_code_hash, protocol_version: Some(input.common().protocol_version), }; @@ -653,6 +656,7 @@ pub struct CommitmentCommonInput { pub rollup_root_hash: H256, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: ProtocolVersionId, } @@ -693,6 +697,7 @@ impl CommitmentInput { rollup_root_hash, bootloader_code_hash: base_system_contracts_hashes.bootloader, default_aa_code_hash: base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: base_system_contracts_hashes.evm_emulator, protocol_version, }; if protocol_version.is_pre_boojum() { diff --git a/core/lib/types/src/commitment/tests/mod.rs b/core/lib/types/src/commitment/tests/mod.rs index 34e308cfd0a..33fb0142b04 100644 --- a/core/lib/types/src/commitment/tests/mod.rs +++ b/core/lib/types/src/commitment/tests/mod.rs @@ -50,3 +50,8 @@ fn post_boojum_1_4_2() { fn post_boojum_1_5_0() { run_test("post_boojum_1_5_0_test"); } + +#[test] +fn post_boojum_1_5_0_with_evm() { + run_test("post_boojum_1_5_0_test_with_evm"); +} diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json new file mode 100644 index 00000000000..4e8c0e0814a --- /dev/null +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json @@ -0,0 +1,359 @@ +{ + "input": { + "PostBoojum": { + "common": { + "l2_to_l1_logs": [ + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x7814f203b8e02f6a676b8f7faefcf732d8b4368bab25239ea4525010aa85d5ee", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "rollup_last_leaf_index": 89, + "rollup_root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b", + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "system_logs": [ + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 0, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000004", + "value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + }, + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 1, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000003", + "value": "0x00000000000000000000000065c22f8000000000000000000000000065c22f81" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000005", + "value": "0x155c82febe94e07df0065c153e8ed403b5351fd64d657c8dffbfbee8ec3d2ba3" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000006", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x85a7fb853512ba6575c99ee121dd560559523a4587a2cd7e83cd359cd9ea2aed" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000002", + "value": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000007", + "value": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000008" + } + ], + "state_diffs": [ + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1", + "derived_key": [ + 113, 233, 23, 33, 249, 145, 133, 118, 215, 96, 240, 47, 3, 202, 196, + 124, 111, 64, 3, 49, 96, 49, 132, 142, 60, 29, 153, 230, 232, 58, + 71, 67 + ], + "enumeration_index": 49, + "initial_value": "0x18776f28c303800", + "final_value": "0x708da482cab20760" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x294a00337abeee2b3cd948ffeed92231e2a3acc2eb11210400e0aa9557f23e26", + "derived_key": [ + 45, 90, 105, 98, 204, 206, 229, 212, 173, 180, 138, 54, 187, 191, + 68, 58, 83, 23, 33, 72, 67, 129, 18, 89, 55, 243, 0, 26, 197, 255, + 135, 91 + ], + "enumeration_index": 50, + "initial_value": "0xf5559e28fd66c0", + "final_value": "0xf5a19b324caf80" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, 166, 75, 35, 133, + 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, 64, 49, 220, + 193, 72, 27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x6f05e193353286a0" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x7", + "derived_key": [ + 18, 59, 175, 197, 134, 247, 119, 100, 72, 140, 210, 76, 106, 119, + 84, 110, 90, 15, 232, 189, 251, 79, 162, 3, 207, 175, 252, 54, 204, + 228, 221, 91 + ], + "enumeration_index": 53, + "initial_value": "0x100000000000000000000000065c22e3e", + "final_value": "0x200000000000000000000000065c22f80" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x9", + "derived_key": [ + 142, 125, 208, 106, 197, 183, 59, 71, 59, 230, 188, 90, 81, 3, 15, + 76, 116, 55, 101, 124, 183, 178, 155, 243, 118, 197, 100, 184, 209, + 103, 90, 94 + ], + "enumeration_index": 54, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xd", + "derived_key": [ + 235, 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, + 40, 14, 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, + 247, 152, 97 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xebbe609cd3ccd11f273eb94374d6d3a2f7856c5f1039dc4877c6a334188ac7c1" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xe", + "derived_key": [ + 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, 58, + 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x708e7fcf68ebab6c87322686cac4bcdb5f2bd4c71f337b18d147fd9a6c44ad13" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10c", + "derived_key": [ + 121, 9, 53, 136, 208, 232, 71, 239, 167, 58, 16, 206, 32, 228, 121, + 159, 177, 228, 102, 66, 214, 86, 23, 199, 229, 33, 63, 160, 73, 137, + 217, 45 + ], + "enumeration_index": 57, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xad67d757c34507f157cacfa2e3153e9f260a2244f30428821be7be64587ac55f", + "derived_key": [ + 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, 44, 164, 124, 169, + 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, 57, 36, 22, + 48, 203, 70 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + } + ], + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + }, + "pass_through_data": { + "shared_states": [ + { + "last_leaf_index": 89, + "root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b" + }, + { + "last_leaf_index": 0, + "root_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "meta_parameters": { + "zkporter_is_available": false, + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "auxiliary_output": { + "PostBoojum": { + "common": { + "l2_l1_logs_merkle_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff", + "protocol_version": "Version23" + }, + "system_logs_linear_hash": "0x602dacc0a26e3347f0679924c4ae151ff5200e7dd80902fe0fc11c806c4d3ffb", + "state_diffs_compressed": [ + 1, 0, 1, 72, 4, 0, 4, 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, + 166, 75, 35, 133, 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, + 64, 49, 220, 193, 72, 27, 65, 111, 5, 225, 147, 53, 50, 134, 160, 235, + 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, 40, 14, + 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, 247, 152, 97, + 0, 235, 190, 96, 156, 211, 204, 209, 31, 39, 62, 185, 67, 116, 214, 211, + 162, 247, 133, 108, 95, 16, 57, 220, 72, 119, 198, 163, 52, 24, 138, + 199, 193, 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, + 58, 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216, 0, 112, 142, 127, 207, 104, 235, 171, 108, 135, 50, 38, 134, + 202, 196, 188, 219, 95, 43, 212, 199, 31, 51, 123, 24, 209, 71, 253, + 154, 108, 68, 173, 19, 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, + 44, 164, 124, 169, 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, + 57, 36, 22, 48, 203, 70, 0, 85, 97, 141, 181, 255, 36, 174, 228, 210, + 54, 146, 27, 111, 66, 114, 16, 17, 97, 19, 113, 21, 163, 180, 196, 166, + 95, 134, 119, 177, 36, 192, 28, 0, 0, 0, 49, 65, 111, 6, 45, 144, 62, + 129, 207, 96, 0, 0, 0, 50, 49, 75, 253, 9, 79, 72, 192, 0, 0, 0, 53, + 137, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 54, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 57, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66 + ], + "state_diffs_hash": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0", + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_linear_hashes": [ + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000007", + "0x0000000000000000000000000000000000000000000000000000000000000008", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + }, + "hashes": { + "pass_through_data": "0x6a3ffc0f55d4abce9498b8bcb01a3018bc2b83d96acb27e23772fe9347954725", + "aux_output": "0xadc63d9c45f85598f3e3c232970315d1f6ac96222e379e16ced7a204524a4061", + "meta_parameters": "0x02531e5cc22688523a4ac9317e5097743771f6914015cf1152491cf22084bd58", + "commitment": "0x4fdd8c5b231dfc9fc81aba744a90fbec78627f529ac29f9fc758a7b9e62fa321" + } +} diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 036d2a7a036..48e813e571d 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -216,7 +216,9 @@ impl L2Tx { let raw = req.get_signed_bytes(&sig).context("get_signed_bytes")?; let (req, hash) = TransactionRequest::from_bytes_unverified(&raw).context("from_bytes_unverified()")?; - let mut tx = L2Tx::from_request_unverified(req).context("from_request_unverified()")?; + // Since we allow users to specify `None` recipient, EVM emulation is implicitly enabled. + let mut tx = + L2Tx::from_request_unverified(req, true).context("from_request_unverified()")?; tx.set_input(raw, hash); Ok(tx) } diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 67661eb14ad..a50fc8a655b 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -331,9 +331,14 @@ impl TryFrom for abi::Transaction { } } -impl TryFrom for Transaction { - type Error = anyhow::Error; - fn try_from(tx: abi::Transaction) -> anyhow::Result { +impl Transaction { + /// Converts a transaction from its ABI representation. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables L2 transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_abi(tx: abi::Transaction, allow_no_target: bool) -> anyhow::Result { Ok(match tx { abi::Transaction::L1 { tx, @@ -405,7 +410,7 @@ impl TryFrom for Transaction { abi::Transaction::L2(raw) => { let (req, hash) = transaction_request::TransactionRequest::from_bytes_unverified(&raw)?; - let mut tx = L2Tx::from_request_unverified(req)?; + let mut tx = L2Tx::from_request_unverified(req, allow_no_target)?; tx.set_input(raw, hash); tx.into() } diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index 1afb108a053..48f26dfd5c7 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -62,6 +62,8 @@ pub struct ProtocolUpgrade { pub bootloader_code_hash: Option, /// New default account code hash. pub default_account_code_hash: Option, + /// New EVM emulator code hash + pub evm_emulator_code_hash: Option, /// New verifier params. pub verifier_params: Option, /// New verifier address. @@ -118,17 +120,21 @@ impl ProtocolUpgrade { bootloader_code_hash: (bootloader_hash != H256::zero()).then_some(bootloader_hash), default_account_code_hash: (default_account_hash != H256::zero()) .then_some(default_account_hash), + evm_emulator_code_hash: None, // EVM emulator upgrades are not supported yet verifier_params: (upgrade.verifier_params != abi::VerifierParams::default()) .then_some(upgrade.verifier_params.into()), verifier_address: (upgrade.verifier != Address::zero()).then_some(upgrade.verifier), timestamp: upgrade.upgrade_timestamp.try_into().unwrap(), tx: (upgrade.l2_protocol_upgrade_tx.tx_type != U256::zero()) .then(|| { - Transaction::try_from(abi::Transaction::L1 { - tx: upgrade.l2_protocol_upgrade_tx, - factory_deps: upgrade.factory_deps, - eth_block: 0, - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: upgrade.l2_protocol_upgrade_tx, + factory_deps: upgrade.factory_deps, + eth_block: 0, + }, + true, + ) .context("Transaction::try_from()")? .try_into() .map_err(|err| anyhow::format_err!("try_into::(): {err}")) @@ -149,11 +155,14 @@ pub fn decode_set_chain_id_event( .unwrap_or_else(|_| panic!("Version is not supported, packed version: {full_version_id}")); Ok(( protocol_version, - Transaction::try_from(abi::Transaction::L1 { - tx: tx.into(), - eth_block: 0, - factory_deps: vec![], - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: tx.into(), + eth_block: 0, + factory_deps: vec![], + }, + true, + ) .unwrap() .try_into() .unwrap(), @@ -298,6 +307,9 @@ impl ProtocolVersion { default_aa: upgrade .default_account_code_hash .unwrap_or(self.base_system_contracts_hashes.default_aa), + evm_emulator: upgrade + .evm_emulator_code_hash + .or(self.base_system_contracts_hashes.evm_emulator), }, tx: upgrade.tx, } diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index a30a57bffa5..9ef037dc29b 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -5,7 +5,7 @@ pub use log::*; use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::keccak256, L2ChainId}; pub use zksync_system_constants::*; -use zksync_utils::address_to_h256; +use zksync_utils::{address_to_h256, u256_to_h256}; use crate::{AccountTreeId, Address, H160, H256, U256}; @@ -78,6 +78,10 @@ pub fn get_code_key(account: &Address) -> StorageKey { StorageKey::new(account_code_storage, address_to_h256(account)) } +pub fn get_evm_code_hash_key(account: &Address) -> StorageKey { + get_deployer_key(get_address_mapping_key(account, u256_to_h256(1.into()))) +} + pub fn get_known_code_key(hash: &H256) -> StorageKey { let known_codes_storage = AccountTreeId::new(KNOWN_CODES_STORAGE_ADDRESS); StorageKey::new(known_codes_storage, *hash) @@ -88,6 +92,11 @@ pub fn get_system_context_key(key: H256) -> StorageKey { StorageKey::new(system_context, key) } +pub fn get_deployer_key(key: H256) -> StorageKey { + let deployer_contract = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); + StorageKey::new(deployer_contract, key) +} + pub fn get_is_account_key(account: &Address) -> StorageKey { let deployer = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index a28c45b8fea..4329680991c 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -1,11 +1,11 @@ use std::path::PathBuf; -use once_cell::sync::Lazy; use zksync_basic_types::{AccountTreeId, Address, U256}; use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage, SystemContractsRepo}; use zksync_system_constants::{ BOOTLOADER_UTILITIES_ADDRESS, CODE_ORACLE_ADDRESS, COMPRESSOR_ADDRESS, CREATE2_FACTORY_ADDRESS, - EVENT_WRITER_ADDRESS, P256VERIFY_PRECOMPILE_ADDRESS, PUBDATA_CHUNK_PUBLISHER_ADDRESS, + EVENT_WRITER_ADDRESS, EVM_GAS_MANAGER_ADDRESS, P256VERIFY_PRECOMPILE_ADDRESS, + PUBDATA_CHUNK_PUBLISHER_ADDRESS, }; use crate::{ @@ -25,7 +25,7 @@ use crate::{ pub const TX_NONCE_INCREMENT: U256 = U256([1, 0, 0, 0]); // 1 pub const DEPLOYMENT_NONCE_INCREMENT: U256 = U256([0, 0, 1, 0]); // 2^128 -static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 25] = [ +static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 26] = [ ( "", "AccountCodeStorage", @@ -147,6 +147,12 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 25] = [ COMPLEX_UPGRADER_ADDRESS, ContractLanguage::Sol, ), + ( + "", + "EvmGasManager", + EVM_GAS_MANAGER_ADDRESS, + ContractLanguage::Sol, + ), // For now, only zero address and the bootloader address have empty bytecode at the init // In the future, we might want to set all of the system contracts this way. ("", "EmptyContract", Address::zero(), ContractLanguage::Sol), @@ -170,29 +176,40 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 25] = [ ), ]; -static SYSTEM_CONTRACTS: Lazy> = Lazy::new(|| { +/// Gets default set of system contracts, based on Cargo workspace location. +pub fn get_system_smart_contracts(use_evm_emulator: bool) -> Vec { SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) - .collect::>() -}); - -/// Gets default set of system contracts, based on Cargo workspace location. -pub fn get_system_smart_contracts() -> Vec { - SYSTEM_CONTRACTS.clone() + .collect() } /// Loads system contracts from a given directory. -pub fn get_system_smart_contracts_from_dir(path: PathBuf) -> Vec { +pub fn get_system_smart_contracts_from_dir( + path: PathBuf, + use_evm_emulator: bool, +) -> Vec { let repo = SystemContractsRepo { root: path }; SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) .collect::>() } diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 5f26b1d6a6a..a8713f301ba 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -809,6 +809,7 @@ impl TransactionRequest { impl L2Tx { pub(crate) fn from_request_unverified( mut value: TransactionRequest, + allow_no_target: bool, ) -> Result { let fee = value.get_fee_data_checked()?; let nonce = value.get_nonce_checked()?; @@ -817,8 +818,7 @@ impl L2Tx { let meta = value.eip712_meta.take().unwrap_or_default(); validate_factory_deps(&meta.factory_deps)?; - // TODO: Remove this check when evm equivalence gets enabled - if value.to.is_none() { + if value.to.is_none() && !allow_no_target { return Err(SerializationTransactionError::ToAddressIsNull); } @@ -848,11 +848,18 @@ impl L2Tx { Ok(tx) } + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. pub fn from_request( - value: TransactionRequest, + request: TransactionRequest, max_tx_size: usize, + allow_no_target: bool, ) -> Result { - let tx = Self::from_request_unverified(value)?; + let tx = Self::from_request_unverified(request, allow_no_target)?; tx.check_encoded_size(max_tx_size)?; Ok(tx) } @@ -916,11 +923,19 @@ impl From for TransactionRequest { } } -impl TryFrom for L1Tx { - type Error = SerializationTransactionError; - fn try_from(tx: CallRequest) -> Result { +impl L1Tx { + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_request( + request: CallRequest, + allow_no_target: bool, + ) -> Result { // L1 transactions have no limitations on the transaction size. - let tx: L2Tx = L2Tx::from_request(tx.into(), MAX_ENCODED_TX_SIZE)?; + let tx: L2Tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE, allow_no_target)?; // Note, that while the user has theoretically provided the fee for ETH on L1, // the payment to the operator as well as refunds happen on L2 and so all the ETH @@ -1316,7 +1331,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert!(execute_tx1.is_ok()); let tx2 = TransactionRequest { @@ -1327,7 +1342,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::TooBigNonce @@ -1344,7 +1359,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert_eq!( execute_tx1.unwrap_err(), SerializationTransactionError::MaxFeePerGasNotU64 @@ -1358,7 +1373,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::MaxPriorityFeePerGasNotU64 @@ -1376,7 +1391,7 @@ mod tests { }; let execute_tx3: Result = - L2Tx::from_request(tx3, usize::MAX); + L2Tx::from_request(tx3, usize::MAX, true); assert_eq!( execute_tx3.unwrap_err(), SerializationTransactionError::MaxFeePerPubdataByteNotU64 @@ -1432,7 +1447,7 @@ mod tests { let request = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); assert_matches!( - L2Tx::from_request(request.0, random_tx_max_size), + L2Tx::from_request(request.0, random_tx_max_size, true), Err(SerializationTransactionError::OversizedData(_, _)) ) } @@ -1458,7 +1473,7 @@ mod tests { }; let try_to_l2_tx: Result = - L2Tx::from_request(call_request.into(), random_tx_max_size); + L2Tx::from_request(call_request.into(), random_tx_max_size, true); assert_matches!( try_to_l2_tx, @@ -1483,15 +1498,20 @@ mod tests { access_list: None, eip712_meta: None, }; - let l2_tx = L2Tx::from_request(call_request_with_nonce.clone().into(), MAX_ENCODED_TX_SIZE) - .unwrap(); + let l2_tx = L2Tx::from_request( + call_request_with_nonce.clone().into(), + MAX_ENCODED_TX_SIZE, + true, + ) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(123u32)); let mut call_request_without_nonce = call_request_with_nonce; call_request_without_nonce.nonce = None; let l2_tx = - L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE).unwrap(); + L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE, true) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(0u32)); } diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index c133261bc23..0edece9e46b 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -1,6 +1,7 @@ use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use zksync_utils::ZeroPrefixHexSerde; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_utils::{bytecode::hash_bytecode, ZeroPrefixHexSerde}; use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; @@ -89,8 +90,7 @@ impl Execute { &self.calldata } - /// Prepares calldata to invoke deployer contract. - /// This method encodes parameters for the `create` method. + /// Prepares calldata to invoke deployer contract. This method encodes parameters for the `create` method. pub fn encode_deploy_params_create( salt: H256, contract_hash: H256, @@ -116,4 +116,24 @@ impl Execute { FUNCTION_SIGNATURE.iter().copied().chain(params).collect() } + + /// Creates an instance for deploying the specified bytecode without additional dependencies. If necessary, + /// additional deps can be added to `Self.factory_deps` after this call. + pub fn for_deploy( + salt: H256, + contract_bytecode: Vec, + constructor_input: &[ethabi::Token], + ) -> Self { + let bytecode_hash = hash_bytecode(&contract_bytecode); + Self { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata: Self::encode_deploy_params_create( + salt, + bytecode_hash, + ethabi::encode(constructor_input), + ), + value: 0.into(), + factory_deps: vec![contract_bytecode], + } + } } diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index 8ba77305ad7..c820ea794fe 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -133,26 +133,33 @@ impl BlockInfo { let protocol_version = l2_block_header .protocol_version .unwrap_or(ProtocolVersionId::last_potentially_undefined()); - + // We cannot use the EVM emulator mentioned in the block as is because of batch vs playground settings etc. + // Instead, we just check whether EVM emulation in general is enabled for a block, and store this binary flag for further use. + let use_evm_emulator = l2_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some(); Ok(ResolvedBlockInfo { state_l2_block_number, state_l2_block_hash: l2_block_header.hash, vm_l1_batch_number, l1_batch_timestamp, protocol_version, + use_evm_emulator, is_pending: self.is_pending_l2_block(), }) } } /// Resolved [`BlockInfo`] containing additional data from VM state. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ResolvedBlockInfo { state_l2_block_number: L2BlockNumber, state_l2_block_hash: H256, vm_l1_batch_number: L1BatchNumber, l1_batch_timestamp: u64, protocol_version: ProtocolVersionId, + use_evm_emulator: bool, is_pending: bool, } @@ -161,6 +168,14 @@ impl ResolvedBlockInfo { pub fn state_l2_block_number(&self) -> L2BlockNumber { self.state_l2_block_number } + + pub fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version + } + + pub fn use_evm_emulator(&self) -> bool { + self.use_evm_emulator + } } impl OneshotEnvParameters { @@ -213,7 +228,10 @@ impl OneshotEnvParameters { version: resolved_block_info.protocol_version, base_system_smart_contracts: self .base_system_contracts - .get_by_protocol_version(resolved_block_info.protocol_version) + .get_by_protocol_version( + resolved_block_info.protocol_version, + resolved_block_info.use_evm_emulator, + ) .clone(), bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, execution_mode, diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index 3b3a65fe30b..0e1fb9b2762 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -26,8 +26,12 @@ pub(super) struct MultiVMBaseSystemContracts { impl MultiVMBaseSystemContracts { /// Gets contracts for a certain version. - pub fn get_by_protocol_version(&self, version: ProtocolVersionId) -> &BaseSystemContracts { - match version { + pub fn get_by_protocol_version( + &self, + version: ProtocolVersionId, + use_evm_emulator: bool, + ) -> BaseSystemContracts { + let base = match version { ProtocolVersionId::Version0 | ProtocolVersionId::Version1 | ProtocolVersionId::Version2 @@ -54,6 +58,14 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version24 | ProtocolVersionId::Version25 => { &self.vm_1_5_0_increased_memory } + }; + let base = base.clone(); + + if version.is_post_1_5_0() && use_evm_emulator { + // EVM emulator is not versioned now; the latest version is always checked out + base.with_latest_evm_emulator() + } else { + base } } diff --git a/core/lib/vm_executor/src/oneshot/mock.rs b/core/lib/vm_executor/src/oneshot/mock.rs index 8f3a12603c1..a7363c633c6 100644 --- a/core/lib/vm_executor/src/oneshot/mock.rs +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -68,6 +68,7 @@ impl MockOneshotExecutor { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }, ) diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index a2369820a5b..fa0e530c190 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -309,7 +309,11 @@ impl L1BatchParamsProvider { let contract_hashes = first_l2_block_in_batch.header.base_system_contracts_hashes; let base_system_contracts = storage .factory_deps_dal() - .get_base_system_contracts(contract_hashes.bootloader, contract_hashes.default_aa) + .get_base_system_contracts( + contract_hashes.bootloader, + contract_hashes.default_aa, + contract_hashes.evm_emulator, + ) .await .context("failed getting base system contracts")?; diff --git a/core/lib/vm_interface/src/storage/in_memory.rs b/core/lib/vm_interface/src/storage/in_memory.rs index 6a8b5643345..d83f675cd54 100644 --- a/core/lib/vm_interface/src/storage/in_memory.rs +++ b/core/lib/vm_interface/src/storage/in_memory.rs @@ -36,7 +36,7 @@ impl InMemoryStorage { Self::with_custom_system_contracts_and_chain_id( chain_id, bytecode_hasher, - get_system_smart_contracts(), + get_system_smart_contracts(false), ) } diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 3e53aad85f1..018ea075db5 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, @@ -118,6 +120,10 @@ pub struct VmExecutionResultAndLogs { pub logs: VmExecutionLogs, pub statistics: VmExecutionStatistics, pub refunds: Refunds, + /// Bytecodes decommitted during VM execution. `None` if not computed by the VM. + // FIXME: currently, this is only filled up by `vm_latest`; probably makes sense to narrow down + // to *dynamic* factory deps, so that `HashMap::new()` is a valid value for VMs not supporting EVM emulation. + pub new_known_factory_deps: Option>>, } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs index 27241c2c0fa..8f7c1d4fb0d 100644 --- a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs +++ b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs @@ -26,6 +26,7 @@ impl FinishedL1Batch { logs: VmExecutionLogs::default(), statistics: VmExecutionStatistics::default(), refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: vec![], diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index d974f2e9aa1..14ac37e5936 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -175,7 +175,7 @@ impl SandboxExecutor { let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); let resolve_started_at = Instant::now(); let resolve_time = resolve_started_at.elapsed(); - let resolved_block_info = block_args.inner.resolve(&mut connection).await?; + let resolved_block_info = &block_args.resolved; // We don't want to emit too many logs. if resolve_time > Duration::from_millis(10) { tracing::debug!("Resolved block numbers (took {resolve_time:?})"); @@ -185,7 +185,7 @@ impl SandboxExecutor { SandboxAction::Execution { fee_input, tx } => { self.options .eth_call - .to_execute_env(&mut connection, &resolved_block_info, *fee_input, tx) + .to_execute_env(&mut connection, resolved_block_info, *fee_input, tx) .await? } &SandboxAction::Call { @@ -197,7 +197,7 @@ impl SandboxExecutor { .eth_call .to_call_env( &mut connection, - &resolved_block_info, + resolved_block_info, fee_input, enforced_base_fee, ) @@ -210,7 +210,7 @@ impl SandboxExecutor { } => { self.options .estimate_gas - .to_env(&mut connection, &resolved_block_info, fee_input, base_fee) + .to_env(&mut connection, resolved_block_info, fee_input, base_fee) .await? } }; diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index 36f10b8e9b0..b560d161ab5 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -10,7 +10,7 @@ use zksync_multivm::utils::get_eth_call_gas_limit; use zksync_types::{ api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, ProtocolVersionId, U256, }; -use zksync_vm_executor::oneshot::BlockInfo; +use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; use self::vm_metrics::SandboxStage; pub(super) use self::{ @@ -285,21 +285,32 @@ pub enum BlockArgsError { } /// Information about a block provided to VM. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub(crate) struct BlockArgs { inner: BlockInfo, + resolved: ResolvedBlockInfo, block_id: api::BlockId, } impl BlockArgs { pub async fn pending(connection: &mut Connection<'_, Core>) -> anyhow::Result { let inner = BlockInfo::pending(connection).await?; + let resolved = inner.resolve(connection).await?; Ok(Self { inner, + resolved, block_id: api::BlockId::Number(api::BlockNumber::Pending), }) } + pub fn protocol_version(&self) -> ProtocolVersionId { + self.resolved.protocol_version() + } + + pub fn use_evm_emulator(&self) -> bool { + self.resolved.use_evm_emulator() + } + /// Loads block information from DB. pub async fn new( connection: &mut Connection<'_, Core>, @@ -326,8 +337,10 @@ impl BlockArgs { return Err(BlockArgsError::Missing); }; + let inner = BlockInfo::for_existing_block(connection, block_number).await?; Ok(Self { - inner: BlockInfo::for_existing_block(connection, block_number).await?, + inner, + resolved: inner.resolve(connection).await?, block_id, }) } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 306018e1543..75788d48058 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -7,7 +7,7 @@ use test_casing::test_casing; use zksync_dal::ConnectionPool; use zksync_multivm::{interface::ExecutionResult, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_node_test_utils::{create_l1_batch, create_l2_block, prepare_recovery_snapshot}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api::state_override::{OverrideAccount, StateOverride}, @@ -93,17 +93,6 @@ async fn creating_block_args_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); - assert_eq!( - pending_block_args.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - ); - assert_eq!( - pending_block_args.resolved_block_number(), - snapshot_recovery.l2_block_number + 1 - ); - assert!(pending_block_args.is_pending()); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) .await .unwrap(); @@ -122,6 +111,35 @@ async fn creating_block_args_after_snapshot_recovery() { .unwrap_err(); assert_matches!(err, BlockArgsError::Missing); + // Ensure there is a batch in the storage. + let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); + storage + .blocks_dal() + .insert_l2_block(&l2_block) + .await + .unwrap(); + storage + .blocks_dal() + .insert_mock_l1_batch(&create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1)) + .await + .unwrap(); + storage + .blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(snapshot_recovery.l1_batch_number + 1) + .await + .unwrap(); + + let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); + assert_eq!( + pending_block_args.block_id, + api::BlockId::Number(api::BlockNumber::Pending) + ); + assert_eq!( + pending_block_args.resolved_block_number(), + snapshot_recovery.l2_block_number + 2 + ); + assert!(pending_block_args.is_pending()); + let pruned_blocks = [ api::BlockNumber::Earliest, 0.into(), @@ -147,13 +165,6 @@ async fn creating_block_args_after_snapshot_recovery() { assert_matches!(err, BlockArgsError::Missing); } - let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); - storage - .blocks_dal() - .insert_l2_block(&l2_block) - .await - .unwrap(); - let latest_block_args = BlockArgs::new(&mut storage, latest_block, &start_info) .await .unwrap(); diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index f5e42875a3d..44e568ce418 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -44,13 +44,14 @@ impl TxSender { pub async fn get_txs_fee_in_wei( &self, tx: Transaction, + block_args: BlockArgs, estimated_fee_scale_factor: f64, acceptable_overestimation: u64, state_override: Option, kind: BinarySearchKind, ) -> Result { let estimation_started_at = Instant::now(); - let mut estimator = GasEstimator::new(self, tx, state_override).await?; + let mut estimator = GasEstimator::new(self, tx, block_args, state_override).await?; estimator.adjust_transaction_fee(); let initial_estimate = estimator.initialize().await?; @@ -309,16 +310,10 @@ impl<'a> GasEstimator<'a> { pub(super) async fn new( sender: &'a TxSender, mut transaction: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { - let mut connection = sender.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - let protocol_version = connection - .blocks_dal() - .pending_protocol_version() - .await - .context("failed getting pending protocol version")?; - drop(connection); + let protocol_version = block_args.protocol_version(); let max_gas_limit = get_max_batch_gas_limit(protocol_version.into()); let fee_input = adjust_pubdata_price_for_tx( diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index ad8e38ef3cc..2dbc0d5a0dd 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -280,13 +280,11 @@ impl TxSender { pub async fn submit_tx( &self, tx: L2Tx, + block_args: BlockArgs, ) -> Result<(L2TxSubmissionResult, VmExecutionResultAndLogs), SubmitTxError> { let tx_hash = tx.hash(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::Validate); - let mut connection = self.acquire_replica_connection().await?; - let protocol_version = connection.blocks_dal().pending_protocol_version().await?; - drop(connection); - self.validate_tx(&tx, protocol_version).await?; + self.validate_tx(&tx, block_args.protocol_version()).await?; stage_latency.observe(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DryRun); @@ -305,9 +303,7 @@ impl TxSender { tx: tx.clone(), }; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; - let mut connection = self.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - + let connection = self.acquire_replica_connection().await?; let execution_output = self .0 .executor diff --git a/core/node/api_server/src/tx_sender/tests/call.rs b/core/node/api_server/src/tx_sender/tests/call.rs index bdddb8e3895..e43f55b2b9a 100644 --- a/core/node/api_server/src/tx_sender/tests/call.rs +++ b/core/node/api_server/src/tx_sender/tests/call.rs @@ -56,7 +56,7 @@ async fn test_call( mut call: CallRequest, ) -> Result, SubmitTxError> { call.gas = call.gas.max(Some(10_000_000.into())); - let call = L2Tx::from_request(call.into(), usize::MAX).unwrap(); + let call = L2Tx::from_request(call.into(), usize::MAX, true).unwrap(); let mut storage = tx_sender .0 diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 086313a8562..3fd5fcb5188 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -25,6 +25,7 @@ const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; async fn initial_gas_estimation_is_somewhat_accurate() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let transfer_value = U256::from(1_000_000_000); @@ -35,7 +36,7 @@ async fn initial_gas_estimation_is_somewhat_accurate() { let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); let tx = alice.create_transfer(transfer_value); - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) .await .unwrap(); estimator.adjust_transaction_fee(); @@ -131,7 +132,8 @@ async fn test_initial_estimate( ) -> VmExecutionResultAndLogs { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) .await .unwrap(); estimator.adjust_transaction_fee(); @@ -153,7 +155,8 @@ async fn test_initial_estimate( async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) .await .unwrap(); estimator.adjust_transaction_fee(); @@ -292,6 +295,7 @@ async fn out_of_gas_during_initial_estimate() { async fn insufficient_funds_error_for_transfer() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let tx = alice.create_transfer(1_000_000_000.into()); @@ -300,6 +304,7 @@ async fn insufficient_funds_error_for_transfer() { let err = tx_sender .get_txs_fee_in_wei( tx.clone().into(), + block_args, fee_scale_factor, 1_000, None, @@ -317,11 +322,13 @@ async fn test_estimating_gas( ) { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let fee_scale_factor = 1.0; let fee = tx_sender .get_txs_fee_in_wei( tx.clone().into(), + block_args.clone(), fee_scale_factor, acceptable_overestimation, Some(state_override.clone()), @@ -339,6 +346,7 @@ async fn test_estimating_gas( let fee = tx_sender .get_txs_fee_in_wei( tx.into(), + block_args, fee_scale_factor, acceptable_overestimation, Some(state_override.clone()), @@ -419,6 +427,7 @@ async fn estimating_gas_for_reverting_tx() { let tx = alice.create_counter_tx(1.into(), true); let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let fee_scale_factor = 1.0; let acceptable_overestimation = 0; @@ -426,6 +435,7 @@ async fn estimating_gas_for_reverting_tx() { let err = tx_sender .get_txs_fee_in_wei( tx.clone().into(), + block_args.clone(), fee_scale_factor, acceptable_overestimation, Some(state_override.clone()), @@ -447,6 +457,7 @@ async fn estimating_gas_for_infinite_loop_tx() { let tx = alice.create_infinite_loop_tx(); let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let fee_scale_factor = 1.0; let acceptable_overestimation = 0; @@ -454,6 +465,7 @@ async fn estimating_gas_for_infinite_loop_tx() { let err = tx_sender .get_txs_fee_in_wei( tx.clone().into(), + block_args.clone(), fee_scale_factor, acceptable_overestimation, Some(state_override.clone()), diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs index 3d48e320abc..cacd616202d 100644 --- a/core/node/api_server/src/tx_sender/tests/mod.rs +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -159,3 +159,8 @@ async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { .await .0 } + +async fn pending_block_args(tx_sender: &TxSender) -> BlockArgs { + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + BlockArgs::pending(&mut storage).await.unwrap() +} diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs index 678b88dab94..fdd63254cf0 100644 --- a/core/node/api_server/src/tx_sender/tests/send_tx.rs +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -42,8 +42,9 @@ async fn submitting_tx_requires_one_connection() { }); let tx_executor = SandboxExecutor::mock(tx_executor).await; let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let block_args = pending_block_args(&tx_sender).await; - let submission_result = tx_sender.submit_tx(tx).await.unwrap(); + let submission_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!(submission_result.0, L2TxSubmissionResult::Added); let mut storage = pool.connection().await.unwrap(); @@ -178,6 +179,7 @@ async fn fee_validation_errors() { async fn sending_transfer() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); // Manually set sufficient balance for the tx initiator. @@ -189,7 +191,7 @@ async fn sending_transfer() { drop(storage); let transfer = alice.create_transfer(1_000_000_000.into()); - let (sub_result, vm_result) = tx_sender.submit_tx(transfer).await.unwrap(); + let (sub_result, vm_result) = tx_sender.submit_tx(transfer, block_args).await.unwrap(); assert_matches!(sub_result, L2TxSubmissionResult::Added); assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); } @@ -198,11 +200,12 @@ async fn sending_transfer() { async fn sending_transfer_with_insufficient_balance() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let transfer_value = 1_000_000_000.into(); let transfer = alice.create_transfer(transfer_value); - let err = tx_sender.submit_tx(transfer).await.unwrap_err(); + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); assert_matches!( err, SubmitTxError::NotEnoughBalanceForFeeValue(balance, _, value) if balance.is_zero() @@ -214,6 +217,7 @@ async fn sending_transfer_with_insufficient_balance() { async fn sending_transfer_with_incorrect_signature() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let transfer_value = 1_000_000_000.into(); @@ -226,7 +230,7 @@ async fn sending_transfer_with_incorrect_signature() { let mut transfer = alice.create_transfer(transfer_value); transfer.execute.value = transfer_value / 2; // This should invalidate tx signature - let err = tx_sender.submit_tx(transfer).await.unwrap_err(); + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); assert_matches!(err, SubmitTxError::ValidationFailed(_)); } @@ -235,6 +239,7 @@ async fn sending_transfer_with_incorrect_signature() { async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParams) { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); @@ -246,7 +251,7 @@ async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParam drop(storage); let tx = alice.create_load_test_tx(tx_params); - let (sub_result, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + let (sub_result, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!(sub_result, L2TxSubmissionResult::Added); assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); } @@ -255,6 +260,7 @@ async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParam async fn sending_reverting_transaction() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); @@ -266,7 +272,7 @@ async fn sending_reverting_transaction() { drop(storage); let tx = alice.create_counter_tx(1.into(), true); - let (_, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!( vm_result.result, ExecutionResult::Revert { output } if output.to_string().contains("This method always reverts") @@ -277,6 +283,7 @@ async fn sending_reverting_transaction() { async fn sending_transaction_out_of_gas() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); @@ -288,6 +295,6 @@ async fn sending_transaction_out_of_gas() { drop(storage); let tx = alice.create_infinite_loop_tx(); - let (_, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); } diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 71560e4ddb8..7e99808dbc7 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -259,7 +259,11 @@ impl DebugNamespace { }; let call_overrides = request.get_call_overrides()?; - let call = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; + let call = L2Tx::from_request( + request.into(), + MAX_ENCODED_TX_SIZE, + false, // Even with EVM emulation enabled, calls must specify `to` field + )?; let vm_permit = self .state diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index a412c064fac..721ca985ceb 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -177,6 +177,10 @@ impl EnNamespace { genesis_commitment: Some(genesis_batch.metadata.commitment), bootloader_hash: Some(genesis_batch.header.base_system_contracts_hashes.bootloader), default_aa_hash: Some(genesis_batch.header.base_system_contracts_hashes.default_aa), + evm_emulator_hash: genesis_batch + .header + .base_system_contracts_hashes + .evm_emulator, l1_chain_id: self.state.api_config.l1_chain_id, sl_chain_id: Some(self.state.api_config.l1_chain_id.into()), l2_chain_id: self.state.api_config.l2_chain_id, diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 1d60d839e4e..44362dd098e 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -19,6 +19,7 @@ use zksync_web3_decl::{ }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, @@ -77,7 +78,11 @@ impl EthNamespace { drop(connection); let call_overrides = request.get_call_overrides()?; - let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; + let tx = L2Tx::from_request( + request.into(), + self.state.api_config.max_tx_size, + false, // Even with EVM emulation enabled, calls must specify `to` field + )?; // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. let call_result: Vec = self @@ -108,10 +113,13 @@ impl EthNamespace { let is_eip712 = request_with_gas_per_pubdata_overridden .eip712_meta .is_some(); - + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx: L2Tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // The user may not include the proper transaction type during the estimation of @@ -137,6 +145,7 @@ impl EthNamespace { .tx_sender .get_txs_fee_in_wei( tx.into(), + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -619,10 +628,15 @@ impl EthNamespace { } pub async fn send_raw_transaction_impl(&self, tx_bytes: Bytes) -> Result { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|_| hash).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 2192f11eb14..bcfd7daf346 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, convert::TryInto}; +use std::collections::HashMap; use anyhow::Context as _; use zksync_dal::{Connection, Core, CoreDal, DalError}; @@ -30,6 +30,7 @@ use zksync_web3_decl::{ }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, RpcState}, @@ -63,16 +64,21 @@ impl ZksNamespace { eip712_meta.gas_per_pubdata = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); } + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // When we're estimating fee, we are trying to deduce values related to fee, so we should // not consider provided ones. tx.common_data.fee.max_priority_fee_per_gas = 0u64.into(); tx.common_data.fee.gas_per_pubdata_limit = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); - self.estimate_fee(tx.into(), state_override).await + self.estimate_fee(tx.into(), block_args, state_override) + .await } pub async fn estimate_l1_to_l2_gas_impl( @@ -89,17 +95,25 @@ impl ZksNamespace { } } - let tx: L1Tx = request_with_gas_per_pubdata_overridden - .try_into() - .map_err(Web3Error::SerializationError)?; - - let fee = self.estimate_fee(tx.into(), state_override).await?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let tx = L1Tx::from_request( + request_with_gas_per_pubdata_overridden, + block_args.use_evm_emulator(), + ) + .map_err(Web3Error::SerializationError)?; + + let fee = self + .estimate_fee(tx.into(), block_args, state_override) + .await?; Ok(fee.gas_limit) } async fn estimate_fee( &self, tx: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { let scale_factor = self.state.api_config.estimate_gas_scale_factor; @@ -112,6 +126,7 @@ impl ZksNamespace { .tx_sender .get_txs_fee_in_wei( tx, + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -583,10 +598,15 @@ impl ZksNamespace { &self, tx_bytes: Bytes, ) -> Result<(H256, VmExecutionResultAndLogs), Web3Error> { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|result| (hash, result.1)).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 723661ab908..18c206eaf58 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -245,12 +245,19 @@ pub(crate) struct RpcState { } impl RpcState { - pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { + pub fn parse_transaction_bytes( + &self, + bytes: &[u8], + block_args: &BlockArgs, + ) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.api_config.l2_chain_id; let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id)?; - Ok(( - L2Tx::from_request(tx_request, self.api_config.max_tx_size)?, + L2Tx::from_request( + tx_request, + self.api_config.max_tx_size, + block_args.use_evm_emulator(), + )?, hash, )) } diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index d8086c6c6ad..e29ea246213 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -257,12 +257,12 @@ struct SendRawTransactionTest { } impl SendRawTransactionTest { - fn transaction_bytes_and_hash() -> (Vec, H256) { + fn transaction_bytes_and_hash(include_to: bool) -> (Vec, H256) { let private_key = Self::private_key(); let tx_request = api::TransactionRequest { chain_id: Some(L2ChainId::default().as_u64()), from: Some(private_key.address()), - to: Some(Address::repeat_byte(2)), + to: include_to.then(|| Address::repeat_byte(2)), value: 123_456.into(), gas: (get_intrinsic_constants().l2_tx_intrinsic_gas * 2).into(), gas_price: StateKeeperConfig::for_tests().minimal_l2_gas_price.into(), @@ -313,7 +313,7 @@ impl HttpTest for SendRawTransactionTest { L2BlockNumber(1) }; tx_executor.set_tx_responses(move |tx, env| { - assert_eq!(tx.hash(), Self::transaction_bytes_and_hash().1); + assert_eq!(tx.hash(), Self::transaction_bytes_and_hash(true).1); assert_eq!(env.l1_batch.first_l2_block.number, pending_block.0); ExecutionResult::Success { output: vec![] } }); @@ -334,7 +334,7 @@ impl HttpTest for SendRawTransactionTest { .await?; } - let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(true); let send_result = client.send_raw_transaction(tx_bytes.into()).await?; assert_eq!(send_result, tx_hash); Ok(()) @@ -357,6 +357,50 @@ async fn send_raw_transaction_after_snapshot_recovery() { .await; } +fn assert_null_to_address_error(error: &ClientError) { + if let ClientError::Call(error) = error { + assert_eq!(error.code(), 3); + assert!(error.message().contains("toAddressIsNull"), "{error:?}"); + assert!(error.data().is_none(), "{error:?}"); + } else { + panic!("Unexpected error: {error:?}"); + } +} + +#[derive(Debug)] +struct SendRawTransactionWithoutToAddressTest; + +#[async_trait] +impl HttpTest for SendRawTransactionWithoutToAddressTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut storage = pool.connection().await?; + storage + .storage_logs_dal() + .append_storage_logs( + L2BlockNumber(0), + &[SendRawTransactionTest::balance_storage_log()], + ) + .await?; + + let (tx_bytes, _) = SendRawTransactionTest::transaction_bytes_and_hash(false); + let err = client + .send_raw_transaction(tx_bytes.into()) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[tokio::test] +async fn send_raw_transaction_fails_without_to_address() { + test_http_server(SendRawTransactionWithoutToAddressTest).await; +} + #[derive(Debug)] struct SendTransactionWithDetailedOutputTest; @@ -405,7 +449,7 @@ impl SendTransactionWithDetailedOutputTest { impl HttpTest for SendTransactionWithDetailedOutputTest { fn transaction_executor(&self) -> MockOneshotExecutor { let mut tx_executor = MockOneshotExecutor::default(); - let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(); + let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(true); let vm_execution_logs = VmExecutionLogs { storage_logs: self.storage_logs(), events: self.vm_events(), @@ -423,6 +467,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { logs: vm_execution_logs.clone(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }); tx_executor @@ -443,7 +488,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { ) .await?; - let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(true); let send_result = client .send_raw_transaction_with_detailed_output(tx_bytes.into()) .await?; @@ -835,3 +880,30 @@ async fn estimate_gas_with_state_override() { let inner = EstimateGasTest::new(false); test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } + +#[derive(Debug)] +struct EstimateGasWithoutToAddessTest; + +#[async_trait] +impl HttpTest for EstimateGasWithoutToAddessTest { + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut l2_transaction = create_l2_transaction(10, 100); + l2_transaction.execute.contract_address = None; + l2_transaction.common_data.signature = vec![]; // Remove invalidated signature so that it doesn't trip estimation logic + let err = client + .estimate_gas(l2_transaction.clone().into(), None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[tokio::test] +async fn estimate_gas_fails_without_to_address() { + test_http_server(EstimateGasWithoutToAddessTest).await; +} diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index 6cb14cfda53..cf6971b041c 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -176,6 +176,7 @@ impl CommitmentGenerator { rollup_root_hash: tree_data.hash, bootloader_code_hash: header.base_system_contracts_hashes.bootloader, default_aa_code_hash: header.base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: header.base_system_contracts_hashes.evm_emulator, protocol_version, }; let touched_slots = connection diff --git a/core/node/consensus/src/batch.rs b/core/node/consensus/src/batch.rs index 08246c4e5c0..af38f446c1b 100644 --- a/core/node/consensus/src/batch.rs +++ b/core/node/consensus/src/batch.rs @@ -261,7 +261,7 @@ impl L1BatchWithWitness { // TODO: make consensus payload contain `abi::Transaction` instead. // TODO: currently the payload doesn't contain the block number, which is // annoying. Consider adding it to payload. - let t2: Transaction = abi::Transaction::try_from(t.clone())?.try_into()?; + let t2 = Transaction::from_abi(abi::Transaction::try_from(t.clone())?, true)?; anyhow::ensure!(t == &t2); hasher.push_tx_hash(t.hash()); } diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 65c464d98b9..5817e766c6b 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -57,7 +57,7 @@ pub(crate) fn mock_genesis_params(protocol_version: ProtocolVersionId) -> Genesi GenesisParams::from_genesis_config( cfg, BaseSystemContracts::load_from_disk(), - get_system_smart_contracts(), + get_system_smart_contracts(false), ) .unwrap() } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 6e9e71d74ea..a08d16f456a 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -144,19 +144,19 @@ impl EthTxAggregator { } pub(super) async fn get_multicall_data(&mut self) -> Result { - let calldata = self.generate_calldata_for_multicall(); + let (calldata, evm_emulator_hash_requested) = self.generate_calldata_for_multicall(); let args = CallFunctionArgs::new(&self.functions.aggregate3.name, calldata).for_contract( self.l1_multicall3_address, &self.functions.multicall_contract, ); let aggregate3_result: Token = args.call((*self.eth_client).as_ref()).await?; - self.parse_multicall_data(aggregate3_result) + self.parse_multicall_data(aggregate3_result, evm_emulator_hash_requested) } // Multicall's aggregate function accepts 1 argument - arrays of different contract calls. // The role of the method below is to tokenize input for multicall, which is actually a vector of tokens. // Each token describes a specific contract call. - pub(super) fn generate_calldata_for_multicall(&self) -> Vec { + pub(super) fn generate_calldata_for_multicall(&self) -> (Vec, bool) { const ALLOW_FAILURE: bool = false; // First zksync contract call @@ -215,14 +215,31 @@ impl EthTxAggregator { calldata: get_protocol_version_input, }; - // Convert structs into tokens and return vector with them - vec![ + let mut token_vec = vec![ get_bootloader_hash_call.into_token(), get_default_aa_hash_call.into_token(), get_verifier_params_call.into_token(), get_verifier_call.into_token(), get_protocol_version_call.into_token(), - ] + ]; + + let mut evm_emulator_hash_requested = false; + let get_l2_evm_emulator_hash_input = self + .functions + .get_evm_emulator_bytecode_hash + .as_ref() + .and_then(|f| f.encode_input(&[]).ok()); + if let Some(input) = get_l2_evm_emulator_hash_input { + let call = Multicall3Call { + target: self.state_transition_chain_contract, + allow_failure: ALLOW_FAILURE, + calldata: input, + }; + token_vec.insert(2, call.into_token()); + evm_emulator_hash_requested = true; + } + + (token_vec, evm_emulator_hash_requested) } // The role of the method below is to de-tokenize multicall call's result, which is actually a token. @@ -230,6 +247,7 @@ impl EthTxAggregator { pub(super) fn parse_multicall_data( &self, token: Token, + evm_emulator_hash_requested: bool, ) -> Result { let parse_error = |tokens: &[Token]| { Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( @@ -238,8 +256,9 @@ impl EthTxAggregator { }; if let Token::Array(call_results) = token { - // 5 calls are aggregated in multicall - if call_results.len() != 5 { + let number_of_calls = if evm_emulator_hash_requested { 6 } else { 5 }; + // 5 or 6 calls are aggregated in multicall + if call_results.len() != number_of_calls { return parse_error(&call_results); } let mut call_results_iterator = call_results.into_iter(); @@ -268,12 +287,31 @@ impl EthTxAggregator { ))); } let default_aa = H256::from_slice(&multicall3_default_aa); + + let evm_emulator = if evm_emulator_hash_requested { + let multicall3_evm_emulator = + Multicall3Result::from_token(call_results_iterator.next().unwrap())? + .return_data; + if multicall3_evm_emulator.len() != 32 { + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( + "multicall3 EVM emulator hash data is not of the len of 32: {:?}", + multicall3_evm_emulator + ), + ))); + } + Some(H256::from_slice(&multicall3_evm_emulator)) + } else { + None + }; + let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader, default_aa, + evm_emulator, }; - call_results_iterator.next().unwrap(); + call_results_iterator.next().unwrap(); // FIXME: why is this value requested? let multicall3_verifier_address = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 9be1384daae..86a8c477f9f 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -23,6 +23,8 @@ use crate::{ Aggregator, EthTxAggregator, EthTxManager, }; +pub(super) const STATE_TRANSITION_CONTRACT_ADDRESS: Address = Address::repeat_byte(0xa0); + // Alias to conveniently call static methods of `ETHSender`. type MockEthTxManager = EthTxManager; @@ -172,7 +174,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -192,7 +194,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); l2_gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -212,7 +214,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway_blobs.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -261,7 +263,7 @@ impl EthSenderTester { // ZKsync contract address Address::random(), contracts_config.l1_multicall3_addr, - Address::random(), + STATE_TRANSITION_CONTRACT_ADDRESS, Default::default(), custom_commit_sender_addr, SettlementMode::SettlesToL1, diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index e03532458f1..9e844a8b853 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -1,7 +1,9 @@ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_l1_contract_interface::i_executor::methods::ExecuteBatches; +use zksync_l1_contract_interface::{ + i_executor::methods::ExecuteBatches, multicall3::Multicall3Call, Tokenizable, +}; use zksync_node_test_utils::create_l1_batch; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -9,16 +11,19 @@ use zksync_types::{ commitment::{ L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata, }, + ethabi, ethabi::Token, helpers::unix_timestamp_ms, + web3, web3::contract::Error, - ProtocolVersionId, H256, + Address, ProtocolVersionId, H256, }; use crate::{ abstract_l1_interface::OperatorType, aggregated_operations::AggregatedOperation, - tester::{EthSenderTester, TestL1Batch}, + tester::{EthSenderTester, TestL1Batch, STATE_TRANSITION_CONTRACT_ADDRESS}, + zksync_functions::ZkSyncFunctions, EthSenderError, }; @@ -37,21 +42,59 @@ const COMMITMENT_MODES: [L1BatchCommitmentMode; 2] = [ L1BatchCommitmentMode::Validium, ]; -pub(crate) fn mock_multicall_response() -> Token { - Token::Array(vec![ - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 96])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 32])]), - Token::Tuple(vec![ - Token::Bool(true), - Token::Bytes( +pub(crate) fn mock_multicall_response(call: &web3::CallRequest) -> Token { + let functions = ZkSyncFunctions::default(); + let evm_emulator_getter_signature = functions + .get_evm_emulator_bytecode_hash + .as_ref() + .map(ethabi::Function::short_signature); + let bootloader_signature = functions.get_l2_bootloader_bytecode_hash.short_signature(); + let default_aa_signature = functions + .get_l2_default_account_bytecode_hash + .short_signature(); + let evm_emulator_getter_signature = evm_emulator_getter_signature.as_ref().map(|sig| &sig[..]); + + let calldata = &call.data.as_ref().expect("no calldata").0; + assert_eq!(calldata[..4], functions.aggregate3.short_signature()); + let mut tokens = functions + .aggregate3 + .decode_input(&calldata[4..]) + .expect("invalid multicall"); + assert_eq!(tokens.len(), 1); + let Token::Array(tokens) = tokens.pop().unwrap() else { + panic!("Unexpected input: {tokens:?}"); + }; + + let calls = tokens.into_iter().map(Multicall3Call::from_token); + let response = calls.map(|call| { + let call = call.unwrap(); + assert_eq!(call.target, STATE_TRANSITION_CONTRACT_ADDRESS); + let output = match &call.calldata[..4] { + selector if selector == bootloader_signature => { + vec![1u8; 32] + } + selector if selector == default_aa_signature => { + vec![2u8; 32] + } + selector if Some(selector) == evm_emulator_getter_signature => { + vec![3u8; 32] + } + selector if selector == functions.get_verifier_params.short_signature() => { + vec![4u8; 96] + } + selector if selector == functions.get_verifier.short_signature() => { + vec![5u8; 32] + } + selector if selector == functions.get_protocol_version.short_signature() => { H256::from_low_u64_be(ProtocolVersionId::default() as u64) .0 - .to_vec(), - ), - ]), - ]) + .to_vec() + } + _ => panic!("unexpected call: {call:?}"), + }; + Token::Tuple(vec![Token::Bool(true), Token::Bytes(output)]) + }); + Token::Array(response.collect()) } pub(crate) fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { @@ -74,6 +117,7 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { zkporter_is_available: false, bootloader_code_hash: H256::default(), default_aa_code_hash: H256::default(), + evm_emulator_code_hash: None, protocol_version: Some(ProtocolVersionId::default()), }, aux_data_hash: H256::default(), @@ -656,22 +700,71 @@ async fn skipped_l1_batch_in_the_middle( Ok(()) } -#[test_casing(2, COMMITMENT_MODES)] +#[test_casing(2, [false, true])] #[test_log::test(tokio::test)] -async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { +async fn parsing_multicall_data(with_evm_emulator: bool) { let tester = EthSenderTester::new( ConnectionPool::::test_pool().await, vec![100; 100], false, true, - commitment_mode, + L1BatchCommitmentMode::Rollup, ) .await; - assert!(tester + let mut mock_response = vec![ + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 96])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![5u8; 32])]), + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes( + H256::from_low_u64_be(ProtocolVersionId::latest() as u64) + .0 + .to_vec(), + ), + ]), + ]; + if with_evm_emulator { + mock_response.insert( + 2, + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 32])]), + ); + } + let mock_response = Token::Array(mock_response); + + let parsed = tester .aggregator - .parse_multicall_data(mock_multicall_response()) - .is_ok()); + .parse_multicall_data(mock_response, with_evm_emulator) + .unwrap(); + assert_eq!( + parsed.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + parsed.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + let expected_evm_emulator_hash = with_evm_emulator.then(|| H256::repeat_byte(3)); + assert_eq!( + parsed.base_system_contracts_hashes.evm_emulator, + expected_evm_emulator_hash + ); + assert_eq!(parsed.verifier_address, Address::repeat_byte(5)); + assert_eq!(parsed.protocol_version_id, ProtocolVersionId::latest()); +} + +#[test_log::test(tokio::test)] +async fn parsing_multicall_data_errors() { + let tester = EthSenderTester::new( + ConnectionPool::::test_pool().await, + vec![100; 100], + false, + true, + L1BatchCommitmentMode::Rollup, + ) + .await; let original_wrong_form_data = vec![ // should contain 5 tuples @@ -722,7 +815,7 @@ async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { assert_matches!( tester .aggregator - .parse_multicall_data(wrong_data_instance.clone()), + .parse_multicall_data(wrong_data_instance.clone(), true), Err(EthSenderError::Parse(Error::InvalidOutputType(_))) ); } @@ -739,6 +832,17 @@ async fn get_multicall_data(commitment_mode: L1BatchCommitmentMode) { commitment_mode, ) .await; - let multicall_data = tester.aggregator.get_multicall_data().await; - assert!(multicall_data.is_ok()); + + let data = tester.aggregator.get_multicall_data().await.unwrap(); + assert_eq!( + data.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + data.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + assert_eq!(data.base_system_contracts_hashes.evm_emulator, None); + assert_eq!(data.verifier_address, Address::repeat_byte(5)); + assert_eq!(data.protocol_version_id, ProtocolVersionId::latest()); } diff --git a/core/node/eth_sender/src/zksync_functions.rs b/core/node/eth_sender/src/zksync_functions.rs index 8f13f0e63ae..85508c71c03 100644 --- a/core/node/eth_sender/src/zksync_functions.rs +++ b/core/node/eth_sender/src/zksync_functions.rs @@ -12,6 +12,7 @@ pub(super) struct ZkSyncFunctions { pub(super) get_l2_bootloader_bytecode_hash: Function, pub(super) get_l2_default_account_bytecode_hash: Function, pub(super) get_verifier: Function, + pub(super) get_evm_emulator_bytecode_hash: Option, pub(super) get_verifier_params: Function, pub(super) get_protocol_version: Function, @@ -59,6 +60,8 @@ impl Default for ZkSyncFunctions { get_function(&zksync_contract, "getL2BootloaderBytecodeHash"); let get_l2_default_account_bytecode_hash = get_function(&zksync_contract, "getL2DefaultAccountBytecodeHash"); + let get_evm_emulator_bytecode_hash = + get_optional_function(&zksync_contract, "getL2EvmSimulatorBytecodeHash"); let get_verifier = get_function(&zksync_contract, "getVerifier"); let get_verifier_params = get_function(&zksync_contract, "getVerifierParams"); let get_protocol_version = get_function(&zksync_contract, "getProtocolVersion"); @@ -74,6 +77,7 @@ impl Default for ZkSyncFunctions { post_shared_bridge_execute, get_l2_bootloader_bytecode_hash, get_l2_default_account_bytecode_hash, + get_evm_emulator_bytecode_hash, get_verifier, get_verifier_params, get_protocol_version, diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index feb9eff35b5..d9faf7b664e 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -245,8 +245,11 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - let tx = - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()).unwrap(); + let tx = Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap(); tx.try_into().unwrap() } @@ -272,10 +275,13 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()) - .unwrap() - .try_into() - .unwrap() + Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap() + .try_into() + .unwrap() } async fn create_test_watcher( diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 1f30d314bb0..5c17add2e98 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -104,6 +104,7 @@ impl GenesisParams { default_aa: config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: config.evm_emulator_hash, }; if base_system_contracts_hashes != base_system_contracts.hashes() { return Err(GenesisError::BaseSystemContractsHashes(Box::new( @@ -124,15 +125,18 @@ impl GenesisParams { } pub fn load_genesis_params(config: GenesisConfig) -> Result { - let base_system_contracts = BaseSystemContracts::load_from_disk(); - let system_contracts = get_system_smart_contracts(); + let mut base_system_contracts = BaseSystemContracts::load_from_disk(); + if config.evm_emulator_hash.is_some() { + base_system_contracts = base_system_contracts.with_latest_evm_emulator(); + } + let system_contracts = get_system_smart_contracts(config.evm_emulator_hash.is_some()); Self::from_genesis_config(config, base_system_contracts, system_contracts) } pub fn mock() -> Self { Self { base_system_contracts: BaseSystemContracts::load_from_disk(), - system_contracts: get_system_smart_contracts(), + system_contracts: get_system_smart_contracts(false), config: mock_genesis_config(), } } @@ -172,6 +176,7 @@ pub fn mock_genesis_config() -> GenesisConfig { genesis_commitment: Some(H256::default()), bootloader_hash: Some(base_system_contracts_hashes.bootloader), default_aa_hash: Some(base_system_contracts_hashes.default_aa), + evm_emulator_hash: base_system_contracts_hashes.evm_emulator, l1_chain_id: L1ChainId(9), sl_chain_id: None, l2_chain_id: L2ChainId::default(), @@ -235,6 +240,7 @@ pub async fn insert_genesis_batch( .config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: genesis_params.config.evm_emulator_hash, }; let commitment_input = CommitmentInput::for_genesis_batch( genesis_root_hash, diff --git a/core/node/genesis/src/utils.rs b/core/node/genesis/src/utils.rs index a6c9513dbde..6042513537c 100644 --- a/core/node/genesis/src/utils.rs +++ b/core/node/genesis/src/utils.rs @@ -130,7 +130,8 @@ pub(super) async fn insert_base_system_contracts_to_factory_deps( contracts: &BaseSystemContracts, ) -> Result<(), GenesisError> { let factory_deps = [&contracts.bootloader, &contracts.default_aa] - .iter() + .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, be_words_to_bytes(&c.code))) .collect(); diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 6075ff048bf..7687595740a 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -345,6 +345,7 @@ impl StateKeeperIO for ExternalIO { let default_account_code_hash = protocol_version .default_account_code_hash() .context("Missing default account code hash")?; + let evm_emulator_code_hash = protocol_version.evm_emulator_code_hash(); let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); self.pool .connection_tagged("sync_layer") @@ -362,6 +363,7 @@ impl StateKeeperIO for ExternalIO { BaseSystemContractsHashes { bootloader: bootloader_code_hash, default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, }, l2_system_upgrade_tx_hash, ) @@ -375,9 +377,22 @@ impl StateKeeperIO for ExternalIO { .get_base_system_contract(default_account_code_hash, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch default AA code for {protocol_version:?}"))?; + let evm_emulator = if let Some(hash) = evm_emulator_code_hash { + Some( + self.get_base_system_contract(hash, cursor.next_l2_block) + .await + .with_context(|| { + format!("cannot fetch EVM emulator code for {protocol_version:?}") + })?, + ) + } else { + None + }; + Ok(BaseSystemContracts { bootloader, default_aa, + evm_emulator, }) } diff --git a/core/node/node_sync/src/genesis.rs b/core/node/node_sync/src/genesis.rs index ccc26b417e9..0ff8d0d448c 100644 --- a/core/node/node_sync/src/genesis.rs +++ b/core/node/node_sync/src/genesis.rs @@ -38,6 +38,7 @@ async fn create_genesis_params( let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader: config.bootloader_hash.context("Genesis is not finished")?, default_aa: config.default_aa_hash.context("Genesis is not finished")?, + evm_emulator: config.evm_emulator_hash, }; if zksync_chain_id != config.l2_chain_id { @@ -47,10 +48,11 @@ async fn create_genesis_params( // Load the list of addresses that are known to contain system contracts at any point in time. // Not every of these addresses is guaranteed to be present in the genesis state, but we'll iterate through // them and try to fetch the contract bytecode for each of them. - let system_contract_addresses: Vec<_> = get_system_smart_contracts() - .into_iter() - .map(|contract| *contract.account_id.address()) - .collect(); + let system_contract_addresses: Vec<_> = + get_system_smart_contracts(config.evm_emulator_hash.is_some()) + .into_iter() + .map(|contract| *contract.account_id.address()) + .collect(); // These have to be *initial* base contract hashes of main node // (those that were used during genesis), not necessarily the current ones. @@ -103,6 +105,18 @@ async fn fetch_base_system_contracts( .fetch_system_contract_by_hash(contract_hashes.default_aa) .await? .context("default AA bytecode is missing on main node")?; + let evm_emulator = if let Some(hash) = contract_hashes.evm_emulator { + let bytes = client + .fetch_system_contract_by_hash(hash) + .await? + .context("EVM Simulator bytecode is missing on main node")?; + Some(SystemContractCode { + code: zksync_utils::bytes_to_be_words(bytes), + hash, + }) + } else { + None + }; Ok(BaseSystemContracts { bootloader: SystemContractCode { code: zksync_utils::bytes_to_be_words(bootloader_bytecode), @@ -112,5 +126,6 @@ async fn fetch_base_system_contracts( code: zksync_utils::bytes_to_be_words(default_aa_bytecode), hash: contract_hashes.default_aa, }, + evm_emulator, }) } diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index d9a98c2bce3..3f5791cdf24 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -304,6 +304,7 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo timestamp: snapshot.l2_block_timestamp + 1, bootloader_code_hash: Some(H256::repeat_byte(1)), default_account_code_hash: Some(H256::repeat_byte(1)), + evm_emulator_code_hash: Some(H256::repeat_byte(1)), ..api::ProtocolVersion::default() }; client.insert_protocol_version(next_protocol_version.clone()); @@ -345,6 +346,13 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo next_protocol_version.default_account_code_hash.unwrap() ); + assert_eq!( + persisted_protocol_version + .base_system_contracts_hashes + .evm_emulator, + next_protocol_version.evm_emulator_code_hash + ); + let l2_block = storage .blocks_dal() .get_l2_block_header(snapshot.l2_block_number + 1) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 86cc5323448..8220aef5da0 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -62,6 +62,7 @@ async fn request_tee_proof_inputs() { code: vec![U256([1; 4])], hash: H256([1; 32]), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, diff --git a/core/node/state_keeper/src/executor/mod.rs b/core/node/state_keeper/src/executor/mod.rs index 2fa5c3b9c12..903dae2f1ca 100644 --- a/core/node/state_keeper/src/executor/mod.rs +++ b/core/node/state_keeper/src/executor/mod.rs @@ -40,7 +40,7 @@ impl TxExecutionResult { _ => Self::Success { tx_metrics: Box::new(ExecutionMetricsForCriteria::new(Some(tx), &res.tx_result)), gas_remaining: res.tx_result.statistics.gas_remaining, - tx_result: res.tx_result, + tx_result: res.tx_result.clone(), compressed_bytecodes: res.compressed_bytecodes, call_tracer_result: res.call_traces, }, diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 7a1871dbfea..79072f23aed 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -259,7 +259,7 @@ impl Tester { patch: 0.into(), }, &BASE_SYSTEM_CONTRACTS, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), Default::default(), ) .await diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 24b1ffca631..97340d6496a 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -347,7 +347,7 @@ impl StateKeeperOutputHandler for TreeWritesPersistence { #[cfg(test)] mod tests { - use std::collections::HashSet; + use std::collections::{HashMap, HashSet}; use assert_matches::assert_matches; use futures::FutureExt; @@ -462,6 +462,7 @@ mod tests { tx, tx_result, vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 7ea01e6af1e..e2a90f30691 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{collections::HashMap, time::Duration}; use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; @@ -249,6 +249,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -267,6 +268,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -356,6 +358,7 @@ async fn processing_events_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); } @@ -457,6 +460,7 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom tx.into(), create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 2dc45a5eaaa..02170283e94 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -156,7 +156,7 @@ impl Tester { patch: 0.into(), }, &self.base_system_contracts, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), L1VerifierConfig::default(), ) .await diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index d36ceec7d70..22f24573070 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -498,8 +498,9 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -624,8 +625,9 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -704,8 +706,9 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, vec![], diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index e3fe849e802..962cc807318 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -277,6 +277,8 @@ impl L2BlockMaxPayloadSizeSealer { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_utils::time::seconds_since_epoch; use super::*; @@ -287,6 +289,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index edcf3ccc4f5..d1e82c44bd6 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -33,6 +33,7 @@ pub(crate) fn successful_exec() -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index ffca8dff864..cb282f3b7d6 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -264,6 +264,7 @@ pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { }, statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], @@ -278,6 +279,7 @@ pub(crate) fn rejected_exec(reason: Halt) -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index 80de0f0beff..9e971541b20 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -138,6 +138,7 @@ pub(super) fn create_execution_result( circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index aa2e22cac48..2979ebbd8c2 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -49,6 +49,8 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_multivm::vm_latest::TransactionVmExt; use zksync_types::{L2BlockNumber, ProtocolVersionId, H256}; @@ -76,6 +78,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index d8673088dc3..27995b384ab 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -1,17 +1,14 @@ use std::collections::HashMap; -use once_cell::sync::Lazy; use zksync_multivm::{ interface::{ Call, CompressedBytecodeInfo, ExecutionResult, L2BlockEnv, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionMetrics, VmExecutionResultAndLogs, }, - vm_latest::TransactionVmExt, + vm_latest::{utils::extract_bytecodes_marked_as_known, TransactionVmExt}, }; -use zksync_system_constants::KNOWN_CODES_STORAGE_ADDRESS; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, - ethabi, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, L2BlockNumber, ProtocolVersionId, StorageLogWithPreviousValue, Transaction, H256, }; @@ -19,27 +16,6 @@ use zksync_utils::bytecode::hash_bytecode; use crate::metrics::KEEPER_METRICS; -/// Extracts all bytecodes marked as known on the system contracts. -fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { - static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "MarkedAsKnown", - &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], - ) - }); - - all_generated_events - .iter() - .filter(|event| { - // Filter events from the deployer contract that match the expected signature. - event.address == KNOWN_CODES_STORAGE_ADDRESS - && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE - }) - .map(|event| event.indexed_topics[1]) - .collect() -} - #[derive(Debug, Clone, PartialEq)] pub struct L2BlockUpdates { pub executed_transactions: Vec, @@ -104,6 +80,7 @@ impl L2BlockUpdates { self.block_execution_metrics += execution_metrics; } + #[allow(clippy::too_many_arguments)] pub(crate) fn extend_from_executed_transaction( &mut self, tx: Transaction, @@ -111,6 +88,7 @@ impl L2BlockUpdates { tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, call_traces: Vec, ) { let saved_factory_deps = @@ -145,12 +123,15 @@ impl L2BlockUpdates { // Get transaction factory deps let factory_deps = &tx.execute.factory_deps; - let tx_factory_deps: HashMap<_, _> = factory_deps + let mut tx_factory_deps: HashMap<_, _> = factory_deps .iter() - .map(|bytecode| (hash_bytecode(bytecode), bytecode)) + .map(|bytecode| (hash_bytecode(bytecode), bytecode.clone())) .collect(); + // Ensure that *dynamic* factory deps (ones that may be created when executing EVM contracts) + // are added into the lookup map as well. + tx_factory_deps.extend(new_known_factory_deps); - // Save all bytecodes that were marked as known on the bootloader + // Save all bytecodes that were marked as known in the bootloader let known_bytecodes = saved_factory_deps.into_iter().map(|bytecode_hash| { let bytecode = tx_factory_deps.get(&bytecode_hash).unwrap_or_else(|| { panic!( @@ -230,6 +211,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 2fad56a9929..0cebc5d8b47 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use zksync_contracts::BaseSystemContractsHashes; use zksync_multivm::{ interface::{ @@ -8,7 +10,7 @@ use zksync_multivm::{ }; use zksync_types::{ block::BlockGasCount, fee_model::BatchFeeInput, Address, L1BatchNumber, L2BlockNumber, - ProtocolVersionId, Transaction, + ProtocolVersionId, Transaction, H256, }; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; @@ -104,11 +106,13 @@ impl UpdatesManager { self.protocol_version } + #[allow(clippy::too_many_arguments)] pub fn extend_from_executed_transaction( &mut self, tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, call_traces: Vec, @@ -124,6 +128,7 @@ impl UpdatesManager { tx_l1_gas_this_tx, execution_metrics, compressed_bytecodes, + new_known_factory_deps, call_traces, ); latency.observe(); @@ -233,6 +238,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), new_block_gas_count(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index b9984b78211..9eb53994eee 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -56,6 +56,7 @@ pub fn create_l1_batch(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: None, }, ProtocolVersionId::latest(), ); @@ -88,6 +89,7 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: BaseSystemContractsHashes::default().bootloader, default_aa_code_hash: BaseSystemContractsHashes::default().default_aa, + evm_emulator_code_hash: BaseSystemContractsHashes::default().evm_emulator, protocol_version: Some(ProtocolVersionId::latest()), }, aux_data_hash: H256::zero(), @@ -217,6 +219,7 @@ impl Snapshot { l2_block, factory_deps: [&contracts.bootloader, &contracts.default_aa] .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, zksync_utils::be_words_to_bytes(&c.code))) .collect(), storage_logs, diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 6c2933635b4..dc94752d988 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -209,6 +209,7 @@ async fn get_updates_manager_witness_input_data( ) -> anyhow::Result { let initial_heap_content = output.batch.final_bootloader_memory.clone().unwrap(); // might be just empty let default_aa = system_env.base_system_smart_contracts.hashes().default_aa; + let evm_emulator = system_env.base_system_smart_contracts.hashes().evm_emulator; let bootloader = system_env.base_system_smart_contracts.hashes().bootloader; let bootloader_code_bytes = connection .factory_deps_dal() @@ -240,6 +241,22 @@ async fn get_updates_manager_witness_input_data( used_bytecodes.insert(account_code_hash, account_bytecode); } + let evm_emulator_code_hash = if let Some(evm_emulator) = evm_emulator { + let evm_emulator_code_hash = h256_to_u256(evm_emulator); + if used_contract_hashes.contains(&evm_emulator_code_hash) { + let evm_emulator_bytecode = connection + .factory_deps_dal() + .get_sealed_factory_dep(evm_emulator) + .await? + .ok_or_else(|| anyhow!("EVM Simulator bytecode should exist"))?; + let evm_emulator_bytecode = bytes_to_chunks(&evm_emulator_bytecode); + used_bytecodes.insert(evm_emulator_code_hash, evm_emulator_bytecode); + } + Some(evm_emulator_code_hash) + } else { + None + }; + let storage_refunds = output.batch.final_execution_state.storage_refunds.clone(); let pubdata_costs = output.batch.final_execution_state.pubdata_costs.clone(); let witness_block_state = WitnessStorageState { @@ -254,6 +271,7 @@ async fn get_updates_manager_witness_input_data( protocol_version: system_env.version, bootloader_code, default_account_code_hash: account_code_hash, + evm_emulator_code_hash, storage_refunds, pubdata_costs, witness_block_state, diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 53bef106a8f..575fd59be04 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -322,6 +322,7 @@ async fn store_l1_batches( .iter() .map(|contract| hash_bytecode(&contract.bytecode)) .chain([genesis_params.base_system_contracts().hashes().default_aa]) + .chain(genesis_params.base_system_contracts().hashes().evm_emulator) .map(h256_to_u256) .collect(); diff --git a/core/node/vm_runner/src/tests/output_handler.rs b/core/node/vm_runner/src/tests/output_handler.rs index 1bf30effdbe..f57814ea449 100644 --- a/core/node/vm_runner/src/tests/output_handler.rs +++ b/core/node/vm_runner/src/tests/output_handler.rs @@ -66,6 +66,7 @@ impl OutputHandlerTester { code: vec![], hash: Default::default(), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 999ea6eb6e0..39a36694526 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -154,7 +154,7 @@ impl Account { let max_fee_per_gas = U256::from(0u32); let gas_limit = U256::from(20_000_000); let factory_deps = execute.factory_deps; - abi::Transaction::L1 { + let tx = abi::Transaction::L1 { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&self.address), @@ -186,9 +186,8 @@ impl Account { .into(), factory_deps, eth_block: 0, - } - .try_into() - .unwrap() + }; + Transaction::from_abi(tx, false).unwrap() } pub fn get_test_contract_transaction( diff --git a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol new file mode 100644 index 00000000000..5f4de59681f --- /dev/null +++ b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +/** + * Mock `KnownCodeStorage` counterpart producing `MarkedAsKnown` events and having `publishEVMBytecode` method + * added for EVM emulation, calls to which should be traced by the host. + */ +contract MockKnownCodeStorage { + event MarkedAsKnown(bytes32 indexed bytecodeHash, bool indexed sendBytecodeToL1); + + function markFactoryDeps(bool _shouldSendToL1, bytes32[] calldata _hashes) external { + unchecked { + uint256 hashesLen = _hashes.length; + for (uint256 i = 0; i < hashesLen; ++i) { + _markBytecodeAsPublished(_hashes[i], _shouldSendToL1); + } + } + } + + function markBytecodeAsPublished(bytes32 _bytecodeHash) external { + _markBytecodeAsPublished(_bytecodeHash, false); + } + + function _markBytecodeAsPublished(bytes32 _bytecodeHash, bool _shouldSendToL1) internal { + if (getMarker(_bytecodeHash) == 0) { + assembly { + sstore(_bytecodeHash, 1) + } + emit MarkedAsKnown(_bytecodeHash, _shouldSendToL1); + } + } + + bytes32 evmBytecodeHash; // For tests, it's OK to potentially collide with the marker slot for hash `bytes32(0)` + + /// Sets the EVM bytecode hash to be used in the next `publishEVMBytecode` call. + function setEVMBytecodeHash(bytes32 _bytecodeHash) external { + evmBytecodeHash = _bytecodeHash; + } + + function publishEVMBytecode(bytes calldata _bytecode) external { + bytes32 hash = evmBytecodeHash; + require(hash != bytes32(0), "EVM bytecode hash not set"); + + if (getMarker(evmBytecodeHash) == 0) { + assembly { + sstore(hash, 1) + } + } + emit MarkedAsKnown(hash, getMarker(hash) == 0); + evmBytecodeHash = bytes32(0); + } + + function getMarker(bytes32 _hash) public view returns (uint256 marker) { + assembly { + marker := sload(_hash) + } + } +} + +/** + * Mock `ContractDeployer` counterpart focusing on EVM bytecode deployment (via `create`; this isn't how real EVM bytecode deployment works, + * but it's good enough for low-level tests). + */ +contract MockContractDeployer { + enum AccountAbstractionVersion { + None, + Version1 + } + + address constant CODE_ORACLE_ADDR = address(0x8012); + MockKnownCodeStorage constant KNOWN_CODE_STORAGE_CONTRACT = MockKnownCodeStorage(address(0x8004)); + + /// The returned value is obviously incorrect in the general case, but works well enough when called by the bootloader. + function extendedAccountVersion(address _address) public view returns (AccountAbstractionVersion) { + return AccountAbstractionVersion.Version1; + } + + /// Replaces real deployment with publishing a surrogate EVM "bytecode". + /// @param _salt bytecode hash + /// @param _bytecodeHash ignored, since it's not possible to set arbitrarily + /// @param _input bytecode to publish + function create( + bytes32 _salt, + bytes32 _bytecodeHash, + bytes calldata _input + ) external payable returns (address) { + KNOWN_CODE_STORAGE_CONTRACT.setEVMBytecodeHash(_salt); + KNOWN_CODE_STORAGE_CONTRACT.publishEVMBytecode(_input); + return address(0); + } +} diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 220a75944e0..b7d4ffebcf9 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -13,3 +13,5 @@ prover: recursion_scheduler_level_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 dummy_verifier: true l1_batch_commit_data_generator_mode: Rollup +# Uncomment to enable EVM emulation (requires to run genesis) +# evm_emulator_hash: 0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91 diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 92366b0912b..22ec5c53485 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7697,6 +7697,7 @@ dependencies = [ "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", "circuit_sequencer_api 0.150.5", + "ethabi", "hex", "itertools 0.10.5", "once_cell", diff --git a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs index 23ae1b0f2af..a8bc59bd45e 100644 --- a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs @@ -114,6 +114,10 @@ pub(super) async fn generate_witness( } }; + let evm_emulator_code_hash = input.vm_run_data.evm_emulator_code_hash; + // By convention, default AA is used instead of the EVM emulator if the latter is disabled. + let evm_emulator_code_hash = + evm_emulator_code_hash.unwrap_or(input.vm_run_data.default_account_code_hash); let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, @@ -121,8 +125,7 @@ pub(super) async fn generate_witness( bootloader_contents, false, input.vm_run_data.default_account_code_hash, - // NOTE: this will be evm_simulator_code_hash in future releases - input.vm_run_data.default_account_code_hash, + evm_emulator_code_hash, input.vm_run_data.used_bytecodes, Vec::default(), MAX_CYCLES_FOR_TX as usize, diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 30ec0eeb9c4..41ce906f455 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -146,6 +146,7 @@ impl DeployL1Config { .diamond_init_minimal_l2_gas_price, bootloader_hash: genesis_config.bootloader_hash.unwrap(), default_aa_hash: genesis_config.default_aa_hash.unwrap(), + evm_emulator_hash: genesis_config.evm_emulator_hash, diamond_init_priority_tx_max_pubdata: initial_deployment_config .diamond_init_priority_tx_max_pubdata, diamond_init_pubdata_pricing_mode: initial_deployment_config @@ -194,6 +195,7 @@ pub struct ContractsDeployL1Config { pub diamond_init_minimal_l2_gas_price: u64, pub bootloader_hash: H256, pub default_aa_hash: H256, + pub evm_emulator_hash: Option, } #[derive(Debug, Deserialize, Serialize, Clone)] From 40bded59f01e451837378ae82398fcf151bb7988 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Wed, 9 Oct 2024 06:33:41 -0300 Subject: [PATCH 020/140] fix(zkstack_cli): Fix contract verifier init rate limit (#3034) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fix contract verifier init --- .github/workflows/ci-core-reusable.yml | 1 + .../commands/contract_verifier/args/releases.rs | 16 +++++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index d03e44f8bca..7098b562292 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -168,6 +168,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" >> .env echo RUN_CONTRACT_VERIFICATION_TEST=true >> $GITHUB_ENV - name: Start services diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs index 6f7eae4c168..2b2b4cf97b1 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs @@ -79,13 +79,19 @@ fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result = serde_json::from_str(&response)?; let mut versions = vec![]; From 9450c504cbae23aa3f5612d63e47dbebed320b8a Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Wed, 9 Oct 2024 21:46:28 +1100 Subject: [PATCH 021/140] fix(zk_toolbox): allow hyphens after `zks test rust --options` (#3043) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Forces clap to allow hyphens after `zks test rust --options` ## Why ❔ So that you can run sensible commands like `zks test rust --options "-E test(io::tests::continue_unsealed_batch_on_restart) --no-capture"` ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- .../crates/zk_supervisor/src/commands/test/args/integration.rs | 2 +- zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs index 435dddfc360..6cec40a2e33 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs @@ -9,6 +9,6 @@ pub struct IntegrationArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_TEST_PATTERN_HELP)] + #[clap(short, long, help = MSG_TEST_PATTERN_HELP, allow_hyphen_values(true))] pub test_pattern: Option, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs index ae6b4518e6d..711a4bffae2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs @@ -40,7 +40,7 @@ pub enum TestCommands { Upgrade(UpgradeArgs), #[clap(about = MSG_BUILD_ABOUT)] Build, - #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit")] + #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit", allow_hyphen_values(true))] Rust(RustArgs), #[clap(about = MSG_L1_CONTRACTS_ABOUT, alias = "l1")] L1Contracts, From 25112df39d052f083bc45964f0298b3af5842cac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Wed, 9 Oct 2024 07:48:07 -0300 Subject: [PATCH 022/140] feat(configs): Add port parameter to ConsensusConfig (#2986) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Update consensus config: * Add `port` parameter ## Why ❔ Make it consistent with other config structs. --------- Co-authored-by: Danil --- core/lib/config/src/configs/consensus.rs | 1 + core/lib/config/src/testonly.rs | 1 + core/lib/protobuf_config/src/consensus.rs | 4 ++ .../src/proto/core/consensus.proto | 3 ++ core/node/consensus/src/testonly.rs | 1 + etc/env/consensus_config.yaml | 1 + etc/env/en_consensus_config.yaml | 1 + etc/env/file_based/general.yaml | 7 ++++ zk_toolbox/crates/config/src/consts.rs | 2 - .../src/commands/chain/init/configs.rs | 25 ++++------- .../commands/external_node/prepare_configs.rs | 38 ++++++++--------- zk_toolbox/crates/zk_inception/src/consts.rs | 23 ---------- .../zk_inception/src/utils/consensus.rs | 42 ++----------------- .../crates/zk_inception/src/utils/ports.rs | 8 +++- 14 files changed, 54 insertions(+), 103 deletions(-) diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 918d8f4adab..d864d5d44da 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -115,6 +115,7 @@ impl RpcConfig { /// Config (shared between main node and external node). #[derive(Clone, Debug, PartialEq)] pub struct ConsensusConfig { + pub port: u16, /// Local socket address to listen for the incoming connections. pub server_addr: std::net::SocketAddr, /// Public address of this node (should forward to `server_addr`) diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index a6ff30e04a9..960808aa6a6 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -802,6 +802,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusConfig { use configs::consensus::{ConsensusConfig, Host, NodePublicKey}; ConsensusConfig { + port: self.sample(rng), server_addr: self.sample(rng), public_addr: Host(self.sample(rng)), max_payload_size: self.sample(rng), diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index 81cad437fe4..37f0c52b7aa 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -148,6 +148,9 @@ impl ProtoRepr for proto::Config { }; Ok(Self::Type { + port: required(&self.port) + .and_then(|x| Ok((*x).try_into()?)) + .context("port")?, server_addr: required(&self.server_addr) .and_then(|x| Ok(x.parse()?)) .context("server_addr")?, @@ -182,6 +185,7 @@ impl ProtoRepr for proto::Config { fn build(this: &Self::Type) -> Self { Self { + port: Some(this.port.into()), server_addr: Some(this.server_addr.to_string()), public_addr: Some(this.public_addr.0.clone()), max_payload_size: Some(this.max_payload_size.try_into().unwrap()), diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 92527df739a..98b43f37f48 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -70,6 +70,9 @@ message Config { reserved 3; reserved "validators"; + // Port to listen on, for incoming TCP connections. + optional uint32 port = 12; // required + // IP:port to listen on, for incoming TCP connections. // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). optional string server_addr = 1; // required; IpAddr diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 04a2dfbc083..2cd315ce063 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -154,6 +154,7 @@ fn make_config( genesis_spec: Option, ) -> config::ConsensusConfig { config::ConsensusConfig { + port: cfg.server_addr.port(), server_addr: *cfg.server_addr, public_addr: config::Host(cfg.public_addr.0.clone()), max_payload_size: usize::MAX, diff --git a/etc/env/consensus_config.yaml b/etc/env/consensus_config.yaml index 304ea31fac9..2564865eeb3 100644 --- a/etc/env/consensus_config.yaml +++ b/etc/env/consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3054 server_addr: "127.0.0.1:3054" public_addr: "127.0.0.1:3054" max_payload_size: 2500000 diff --git a/etc/env/en_consensus_config.yaml b/etc/env/en_consensus_config.yaml index f759e72e891..5c428866cb6 100644 --- a/etc/env/en_consensus_config.yaml +++ b/etc/env/en_consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3055 server_addr: '127.0.0.1:3055' public_addr: '127.0.0.1:3055' max_payload_size: 2500000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index a4ba8c0201a..017d79dbe73 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -375,3 +375,10 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 + +consensus: + port: 3054 + server_addr: "127.0.0.1:3054" + public_addr: "127.0.0.1:3054" + max_payload_size: 2500000 + gossip_dynamic_inbound_limit: 100 diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index 80b204cc619..f462ce33b8f 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -62,8 +62,6 @@ pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; /// Default port for the explorer data fetcher service pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; -/// Default port for consensus service -pub const DEFAULT_CONSENSUS_PORT: u16 = 3054; pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs index d0897473b83..37ee2e076ab 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs @@ -2,7 +2,7 @@ use anyhow::Context; use common::logger; use config::{ copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, - ChainConfig, ContractsConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, ContractsConfig, EcosystemConfig, }; use ethers::types::Address; use xshell::Shell; @@ -15,13 +15,12 @@ use crate::{ }, portal::update_portal_config, }, - defaults::PORT_RANGE_END, messages::{ - MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, + MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, }, utils::{ - consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, + consensus::{generate_consensus_keys, get_consensus_secrets, get_genesis_specs}, ports::EcosystemPortsScanner, }, }; @@ -57,22 +56,14 @@ pub async fn init_configs( )?; } - // Initialize general config let mut general_config = chain_config.get_general_config()?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - let offset = ((chain_config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = - ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut consensus_config = general_config + .consensus_config + .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; let consensus_keys = generate_consensus_keys(); - let consensus_config = get_consensus_config( - chain_config, - consensus_port, - Some(consensus_keys.clone()), - None, - )?; + consensus_config.genesis_spec = Some(get_genesis_specs(chain_config, &consensus_keys)); + general_config.consensus_config = Some(consensus_config); general_config.save_with_base_path(shell, &chain_config.configs)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs index 5ab859d17f0..d714a0f8e84 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -6,12 +6,12 @@ use config::{ external_node::ENConfig, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; use zksync_config::configs::{ - consensus::{ConsensusSecrets, NodeSecretKey, Secret}, + consensus::{ConsensusConfig, ConsensusSecrets, NodeSecretKey, Secret}, DatabaseSecrets, L1Secrets, }; use zksync_consensus_crypto::TextFmt; @@ -19,14 +19,13 @@ use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, - defaults::PORT_RANGE_END, messages::{ msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PREPARING_EN_CONFIGS, }, utils::{ - consensus::{get_consensus_config, node_public_key}, + consensus::node_public_key, ports::EcosystemPortsScanner, rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, @@ -79,19 +78,12 @@ fn prepare_configs( bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); + general_en.consensus_config = None; let main_node_consensus_config = general .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - ports.add_port_info( - main_node_consensus_config.server_addr.port(), - "Main node consensus".to_string(), - ); - let offset = ((config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut en_consensus_config = main_node_consensus_config.clone(); let mut gossip_static_outbound = BTreeMap::new(); let main_node_public_key = node_public_key( @@ -101,13 +93,8 @@ fn prepare_configs( .context(MSG_CONSENSUS_SECRETS_MISSING_ERR)?, )? .context(MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR)?; - gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); - - let en_consensus_config = - get_consensus_config(config, consensus_port, None, Some(gossip_static_outbound))?; - general_en.consensus_config = Some(en_consensus_config.clone()); - en_consensus_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.gossip_static_outbound = gossip_static_outbound; // Set secrets config let node_key = roles::node::SecretKey::generate().encode(); @@ -128,16 +115,25 @@ fn prepare_configs( }), data_availability: None, }; - secrets.save_with_base_path(shell, en_configs_path)?; + let dirs = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::ExternalNode)?; set_rocks_db_config(&mut general_en, dirs)?; + general_en.save_with_base_path(shell, en_configs_path)?; en_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.save_with_base_path(shell, en_configs_path)?; + secrets.save_with_base_path(shell, en_configs_path)?; + let offset = 0; // This is zero because general_en ports already have a chain offset ports.allocate_ports_in_yaml( shell, &GeneralConfig::get_path_with_base_path(en_configs_path), - 0, // This is zero because general_en ports already have a chain offset + offset, + )?; + ports.allocate_ports_in_yaml( + shell, + &ConsensusConfig::get_path_with_base_path(en_configs_path), + offset, )?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 9f81847e333..df27d2f02d2 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -1,5 +1,3 @@ -use std::net::{IpAddr, Ipv4Addr}; - pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; @@ -12,27 +10,6 @@ pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; -#[allow(non_upper_case_globals)] -const kB: usize = 1024; - -/// Max payload size for consensus in bytes -pub const MAX_PAYLOAD_SIZE: usize = 2_500_000; -/// Max batch size for consensus in bytes -/// Compute a default batch size, so operators are not caught out by the missing setting -/// while we're still working on batch syncing. The batch interval is ~1 minute, -/// so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high -/// traffic there can be thousands of huge transactions that quickly fill up blocks -/// and there could be more blocks in a batch then expected. We chose a generous -/// limit so as not to prevent any legitimate batch from being transmitted. -pub const MAX_BATCH_SIZE: usize = MAX_PAYLOAD_SIZE * 5000 + kB; -/// Gossip dynamic inbound limit for consensus -pub const GOSSIP_DYNAMIC_INBOUND_LIMIT: usize = 100; - -/// Public address for consensus -pub const CONSENSUS_PUBLIC_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); -/// Server address for consensus -pub const CONSENSUS_SERVER_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); - /// Path to the JS runtime config for the block-explorer-app docker container to be mounted to pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs index 2979b4df0c1..946d28a33fb 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -1,24 +1,14 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - net::SocketAddr, -}; - use anyhow::Context as _; use config::ChainConfig; use secrecy::{ExposeSecret, Secret}; use zksync_config::configs::consensus::{ - AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, - NodePublicKey, NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, - WeightedAttester, WeightedValidator, + AttesterPublicKey, AttesterSecretKey, ConsensusSecrets, GenesisSpec, NodePublicKey, + NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, WeightedAttester, + WeightedValidator, }; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::{attester, node, validator}; -use crate::consts::{ - CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, - MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, -}; - pub(crate) fn parse_attester_committee( attesters: &[WeightedAttester], ) -> anyhow::Result { @@ -48,32 +38,6 @@ pub struct ConsensusPublicKeys { attester_key: attester::PublicKey, } -pub fn get_consensus_config( - chain_config: &ChainConfig, - consensus_port: u16, - consensus_keys: Option, - gossip_static_outbound: Option>, -) -> anyhow::Result { - let genesis_spec = - consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); - - let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, consensus_port); - let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, consensus_port); - - Ok(ConsensusConfig { - server_addr, - public_addr: Host(public_addr.encode()), - genesis_spec, - max_payload_size: MAX_PAYLOAD_SIZE, - gossip_dynamic_inbound_limit: GOSSIP_DYNAMIC_INBOUND_LIMIT, - max_batch_size: MAX_BATCH_SIZE, - gossip_static_inbound: BTreeSet::new(), - gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), - rpc: None, - debug_page_addr: None, - }) -} - pub fn generate_consensus_keys() -> ConsensusSecretKeys { ConsensusSecretKeys { validator_key: validator::SecretKey::generate(), diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zk_toolbox/crates/zk_inception/src/utils/ports.rs index 5102b4fd9c6..3b7b7ae7072 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/ports.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/ports.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, ops::Range, path::Path}; +use std::{collections::HashMap, fmt, net::SocketAddr, ops::Range, path::Path}; use anyhow::{bail, Context, Result}; use config::{ @@ -109,6 +109,12 @@ impl EcosystemPorts { } } } + } else if key.as_str().map(|s| s.ends_with("addr")).unwrap_or(false) { + let socket_addr = val.as_str().unwrap().parse::()?; + if let Some(new_port) = updated_ports.get(&socket_addr.port()) { + let new_socket_addr = SocketAddr::new(socket_addr.ip(), *new_port); + *val = Value::String(new_socket_addr.to_string()); + } } } // Continue traversing From ebf9604c5ab2a1cae1ffd2f9c922f35a1d0ad876 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 9 Oct 2024 13:01:03 +0200 Subject: [PATCH 023/140] feat: Add initial version prover_autoscaler (#2993) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add zksync_prover_autoscaler, which collects data, but only reports metrics instead of actual scaling. ## Why ❔ First step in creating fast global prover autoscaler. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 5 + Cargo.toml | 163 +++--- core/lib/config/Cargo.toml | 4 + core/lib/config/src/configs/mod.rs | 1 + .../config/src/configs/prover_autoscaler.rs | 117 +++++ core/lib/protobuf_config/Cargo.toml | 1 + core/lib/protobuf_config/src/lib.rs | 1 + .../src/proto/config/prover_autoscaler.proto | 46 ++ .../protobuf_config/src/prover_autoscaler.rs | 172 ++++++ prover/Cargo.lock | 491 +++++++++++++++++- prover/Cargo.toml | 18 +- .../crates/bin/prover_autoscaler/Cargo.toml | 45 ++ .../crates/bin/prover_autoscaler/src/agent.rs | 130 +++++ .../prover_autoscaler/src/cluster_types.rs | 66 +++ .../bin/prover_autoscaler/src/global/mod.rs | 3 + .../prover_autoscaler/src/global/queuer.rs | 41 ++ .../prover_autoscaler/src/global/scaler.rs | 360 +++++++++++++ .../prover_autoscaler/src/global/watcher.rs | 89 ++++ .../bin/prover_autoscaler/src/k8s/mod.rs | 5 + .../bin/prover_autoscaler/src/k8s/scaler.rs | 27 + .../bin/prover_autoscaler/src/k8s/watcher.rs | 138 +++++ .../crates/bin/prover_autoscaler/src/lib.rs | 6 + .../crates/bin/prover_autoscaler/src/main.rs | 148 ++++++ .../bin/prover_autoscaler/src/metrics.rs | 14 + .../bin/prover_autoscaler/src/task_wiring.rs | 72 +++ 25 files changed, 2060 insertions(+), 103 deletions(-) create mode 100644 core/lib/config/src/configs/prover_autoscaler.rs create mode 100644 core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto create mode 100644 core/lib/protobuf_config/src/prover_autoscaler.rs create mode 100644 prover/crates/bin/prover_autoscaler/Cargo.toml create mode 100644 prover/crates/bin/prover_autoscaler/src/agent.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/cluster_types.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/global/mod.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/global/queuer.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/global/scaler.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/global/watcher.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/k8s/mod.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/lib.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/main.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/metrics.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/task_wiring.rs diff --git a/Cargo.lock b/Cargo.lock index 5073188d632..55bbb4b5582 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9666,8 +9666,12 @@ dependencies = [ "secrecy", "serde", "serde_json", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -10899,6 +10903,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", diff --git a/Cargo.toml b/Cargo.toml index 691341f71ba..5d516e97aba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,85 +1,85 @@ [workspace] members = [ - # Binaries - "core/bin/block_reverter", - "core/bin/contract-verifier", - "core/bin/external_node", - "core/bin/merkle_tree_consistency_checker", - "core/bin/snapshots_creator", - "core/bin/selector_generator", - "core/bin/system-constants-generator", - "core/bin/verified_sources_fetcher", - "core/bin/zksync_server", - "core/bin/genesis_generator", - "core/bin/zksync_tee_prover", - # Node services - "core/node/node_framework", - "core/node/proof_data_handler", - "core/node/block_reverter", - "core/node/commitment_generator", - "core/node/house_keeper", - "core/node/genesis", - "core/node/shared_metrics", - "core/node/db_pruner", - "core/node/fee_model", - "core/node/da_dispatcher", - "core/node/eth_sender", - "core/node/vm_runner", - "core/node/test_utils", - "core/node/state_keeper", - "core/node/reorg_detector", - "core/node/consistency_checker", - "core/node/metadata_calculator", - "core/node/node_sync", - "core/node/node_storage_init", - "core/node/consensus", - "core/node/contract_verification_server", - "core/node/api_server", - "core/node/tee_verifier_input_producer", - "core/node/base_token_adjuster", - "core/node/external_proof_integration_api", - "core/node/logs_bloom_backfill", - "core/node/da_clients", - # Libraries - "core/lib/db_connection", - "core/lib/zksync_core_leftovers", - "core/lib/basic_types", - "core/lib/config", - "core/lib/constants", - "core/lib/contract_verifier", - "core/lib/contracts", - "core/lib/circuit_breaker", - "core/lib/dal", - "core/lib/env_config", - "core/lib/da_client", - "core/lib/eth_client", - "core/lib/eth_signer", - "core/lib/l1_contract_interface", - "core/lib/mempool", - "core/lib/merkle_tree", - "core/lib/mini_merkle_tree", - "core/lib/node_framework_derive", - "core/lib/object_store", - "core/lib/prover_interface", - "core/lib/queued_job_processor", - "core/lib/state", - "core/lib/storage", - "core/lib/tee_verifier", - "core/lib/types", - "core/lib/protobuf_config", - "core/lib/utils", - "core/lib/vlog", - "core/lib/multivm", - "core/lib/vm_interface", - "core/lib/vm_executor", - "core/lib/web3_decl", - "core/lib/snapshots_applier", - "core/lib/crypto_primitives", - "core/lib/external_price_api", - # Test infrastructure - "core/tests/test_account", - "core/tests/loadnext", - "core/tests/vm-benchmark", + # Binaries + "core/bin/block_reverter", + "core/bin/contract-verifier", + "core/bin/external_node", + "core/bin/merkle_tree_consistency_checker", + "core/bin/snapshots_creator", + "core/bin/selector_generator", + "core/bin/system-constants-generator", + "core/bin/verified_sources_fetcher", + "core/bin/zksync_server", + "core/bin/genesis_generator", + "core/bin/zksync_tee_prover", + # Node services + "core/node/node_framework", + "core/node/proof_data_handler", + "core/node/block_reverter", + "core/node/commitment_generator", + "core/node/house_keeper", + "core/node/genesis", + "core/node/shared_metrics", + "core/node/db_pruner", + "core/node/fee_model", + "core/node/da_dispatcher", + "core/node/eth_sender", + "core/node/vm_runner", + "core/node/test_utils", + "core/node/state_keeper", + "core/node/reorg_detector", + "core/node/consistency_checker", + "core/node/metadata_calculator", + "core/node/node_sync", + "core/node/node_storage_init", + "core/node/consensus", + "core/node/contract_verification_server", + "core/node/api_server", + "core/node/tee_verifier_input_producer", + "core/node/base_token_adjuster", + "core/node/external_proof_integration_api", + "core/node/logs_bloom_backfill", + "core/node/da_clients", + # Libraries + "core/lib/db_connection", + "core/lib/zksync_core_leftovers", + "core/lib/basic_types", + "core/lib/config", + "core/lib/constants", + "core/lib/contract_verifier", + "core/lib/contracts", + "core/lib/circuit_breaker", + "core/lib/dal", + "core/lib/env_config", + "core/lib/da_client", + "core/lib/eth_client", + "core/lib/eth_signer", + "core/lib/l1_contract_interface", + "core/lib/mempool", + "core/lib/merkle_tree", + "core/lib/mini_merkle_tree", + "core/lib/node_framework_derive", + "core/lib/object_store", + "core/lib/prover_interface", + "core/lib/queued_job_processor", + "core/lib/state", + "core/lib/storage", + "core/lib/tee_verifier", + "core/lib/types", + "core/lib/protobuf_config", + "core/lib/utils", + "core/lib/vlog", + "core/lib/multivm", + "core/lib/vm_interface", + "core/lib/vm_executor", + "core/lib/web3_decl", + "core/lib/snapshots_applier", + "core/lib/crypto_primitives", + "core/lib/external_price_api", + # Test infrastructure + "core/tests/test_account", + "core/tests/loadnext", + "core/tests/vm-benchmark", ] resolver = "2" @@ -172,6 +172,7 @@ sqlx = "0.8.1" static_assertions = "1.1" structopt = "0.3.20" strum = "0.26" +strum_macros = "0.26.4" tempfile = "3.0.2" test-casing = "0.1.2" test-log = "0.2.15" @@ -185,7 +186,7 @@ tower-http = "0.5.2" tracing = "0.1" tracing-subscriber = "0.3" tracing-opentelemetry = "0.25.0" -time = "0.3.36" # Has to be same as used by `tracing-subscriber` +time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" fraction = "0.15.3" diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index d1ab5ce8438..af39e5159ba 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -23,6 +23,10 @@ anyhow.workspace = true rand.workspace = true secrecy.workspace = true serde = { workspace = true, features = ["derive"] } +time = { workspace = true, features = ["serde-human-readable"] } +strum.workspace = true +strum_macros.workspace = true +vise.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 1ad503e0687..a8d136d632e 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -60,6 +60,7 @@ pub mod house_keeper; pub mod object_store; pub mod observability; pub mod proof_data_handler; +pub mod prover_autoscaler; pub mod prover_job_monitor; pub mod pruning; pub mod secrets; diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs new file mode 100644 index 00000000000..41131fc1b8c --- /dev/null +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -0,0 +1,117 @@ +use std::collections::HashMap; + +use serde::Deserialize; +use strum::Display; +use strum_macros::EnumString; +use time::Duration; +use vise::EncodeLabelValue; + +use crate::configs::ObservabilityConfig; + +/// Config used for running ProverAutoscaler (both Scaler and Agent). +#[derive(Debug, Clone, PartialEq)] +pub struct ProverAutoscalerConfig { + /// Amount of time ProverJobMonitor will wait all it's tasks to finish. + // TODO: find a way to use #[serde(with = "humantime_serde")] with time::Duration. + pub graceful_shutdown_timeout: Duration, + pub agent_config: Option, + pub scaler_config: Option, + pub observability: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ProverAutoscalerAgentConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// HTTP port for global Scaler to connect to the Agent running in a cluster. + pub http_port: u16, + /// List of namespaces to watch. + #[serde(default = "ProverAutoscalerAgentConfig::default_namespaces")] + pub namespaces: Vec, + /// Watched cluster name. Also can be set via flag. + pub cluster_name: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct ProverAutoscalerScalerConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// The interval between runs for global Scaler. + #[serde(default = "ProverAutoscalerScalerConfig::default_scaler_run_interval")] + pub scaler_run_interval: Duration, + /// URL to get queue reports from. + /// In production should be "http://prover-job-monitor.stage2.svc.cluster.local:3074/queue_report". + #[serde(default = "ProverAutoscalerScalerConfig::default_prover_job_monitor_url")] + pub prover_job_monitor_url: String, + /// List of ProverAutoscaler Agents to get cluster data from. + pub agents: Vec, + /// Mapping of namespaces to protocol versions. + pub protocol_versions: HashMap, + /// Default priorities, which cluster to prefer when there is no other information. + pub cluster_priorities: HashMap, + /// Prover speed per GPU. Used to calculate desired number of provers for queue size. + pub prover_speed: HashMap, + /// Duration after which pending pod considered long pending. + #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] + pub long_pending_duration: Duration, +} + +#[derive( + Default, + Debug, + Display, + Hash, + PartialEq, + Eq, + Clone, + Copy, + Ord, + PartialOrd, + EnumString, + EncodeLabelValue, + Deserialize, +)] +pub enum Gpu { + #[default] + Unknown, + #[strum(ascii_case_insensitive)] + L4, + #[strum(ascii_case_insensitive)] + T4, + #[strum(ascii_case_insensitive)] + V100, + #[strum(ascii_case_insensitive)] + P100, + #[strum(ascii_case_insensitive)] + A100, +} + +impl ProverAutoscalerConfig { + /// Default graceful shutdown timeout -- 5 seconds + pub fn default_graceful_shutdown_timeout() -> Duration { + Duration::seconds(5) + } +} + +impl ProverAutoscalerAgentConfig { + pub fn default_namespaces() -> Vec { + vec!["prover-blue".to_string(), "prover-red".to_string()] + } +} + +impl ProverAutoscalerScalerConfig { + /// Default scaler_run_interval -- 10s + pub fn default_scaler_run_interval() -> Duration { + Duration::seconds(10) + } + + /// Default prover_job_monitor_url -- cluster local URL + pub fn default_prover_job_monitor_url() -> String { + "http://localhost:3074/queue_report".to_string() + } + + /// Default long_pending_duration -- 10m + pub fn default_long_pending_duration() -> Duration { + Duration::minutes(10) + } +} diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml index 92d9bd53978..87a0a63567b 100644 --- a/core/lib/protobuf_config/Cargo.toml +++ b/core/lib/protobuf_config/Cargo.toml @@ -26,6 +26,7 @@ rand.workspace = true hex.workspace = true secrecy.workspace = true tracing.workspace = true +time.workspace = true [build-dependencies] zksync_protobuf_build.workspace = true diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index c89199359aa..7bbe955561b 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -28,6 +28,7 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; +mod prover_autoscaler; mod prover_job_monitor; mod pruning; mod secrets; diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto new file mode 100644 index 00000000000..e1d11b94d8f --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package zksync.config.prover_autoscaler; + +import "zksync/std.proto"; +import "zksync/config/observability.proto"; + +message ProverAutoscalerConfig { + optional std.Duration graceful_shutdown_timeout = 1; // optional + optional ProverAutoscalerAgentConfig agent_config = 2; // optional + optional ProverAutoscalerScalerConfig scaler_config = 3; // optional + optional observability.Observability observability = 4; // optional +} + +message ProverAutoscalerAgentConfig { + optional uint32 prometheus_port = 1; // required + optional uint32 http_port = 2; // required + repeated string namespaces = 3; // optional + optional string cluster_name = 4; // optional +} + +message ProtocolVersion { + optional string namespace = 1; // required + optional string protocol_version = 2; // required +} + +message ClusterPriority { + optional string cluster = 1; // required + optional uint32 priority = 2; // required +} + +message ProverSpeed { + optional string gpu = 1; // required + optional uint32 speed = 2; // required +} + +message ProverAutoscalerScalerConfig { + optional uint32 prometheus_port = 1; // required + optional std.Duration scaler_run_interval = 2; // optional + optional string prover_job_monitor_url = 3; // required + repeated string agents = 4; // required at least one + repeated ProtocolVersion protocol_versions = 5; // repeated at least one + repeated ClusterPriority cluster_priorities = 6; // optional + repeated ProverSpeed prover_speed = 7; // optional + optional uint32 long_pending_duration_s = 8; // optional +} diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs new file mode 100644 index 00000000000..f7da099cb82 --- /dev/null +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -0,0 +1,172 @@ +use anyhow::Context as _; +use time::Duration; +use zksync_config::configs::{self, prover_autoscaler::Gpu}; +use zksync_protobuf::{read_optional, repr::ProtoRepr, required, ProtoFmt}; + +use crate::{proto::prover_autoscaler as proto, read_optional_repr}; + +impl ProtoRepr for proto::ProverAutoscalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + graceful_shutdown_timeout: read_optional(&self.graceful_shutdown_timeout) + .context("graceful_shutdown_timeout")? + .unwrap_or(Self::Type::default_graceful_shutdown_timeout()), + agent_config: read_optional_repr(&self.agent_config), + scaler_config: read_optional_repr(&self.scaler_config), + observability: read_optional_repr(&self.observability), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + graceful_shutdown_timeout: Some(ProtoFmt::build(&this.graceful_shutdown_timeout)), + agent_config: this.agent_config.as_ref().map(ProtoRepr::build), + scaler_config: this.scaler_config.as_ref().map(ProtoRepr::build), + observability: this.observability.as_ref().map(ProtoRepr::build), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerAgentConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerAgentConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, + namespaces: self.namespaces.to_vec(), + cluster_name: Some("".to_string()), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + http_port: Some(this.http_port.into()), + namespaces: this.namespaces.clone(), + cluster_name: this.cluster_name.clone(), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerScalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerScalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + scaler_run_interval: read_optional(&self.scaler_run_interval) + .context("scaler_run_interval")? + .unwrap_or(Self::Type::default_scaler_run_interval()), + prover_job_monitor_url: required(&self.prover_job_monitor_url) + .context("prover_job_monitor_url")? + .clone(), + agents: self.agents.to_vec(), + protocol_versions: self + .protocol_versions + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("protocol_versions")?, + cluster_priorities: self + .cluster_priorities + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("cluster_priorities")?, + prover_speed: self + .prover_speed + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("prover_speed")?, + long_pending_duration: match self.long_pending_duration_s { + Some(s) => Duration::seconds(s.into()), + None => Self::Type::default_long_pending_duration(), + }, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + scaler_run_interval: Some(ProtoFmt::build(&this.scaler_run_interval)), + prover_job_monitor_url: Some(this.prover_job_monitor_url.clone()), + agents: this.agents.clone(), + protocol_versions: this + .protocol_versions + .iter() + .map(|(k, v)| proto::ProtocolVersion::build(&(k.clone(), v.clone()))) + .collect(), + cluster_priorities: this + .cluster_priorities + .iter() + .map(|(k, v)| proto::ClusterPriority::build(&(k.clone(), *v))) + .collect(), + prover_speed: this + .prover_speed + .iter() + .map(|(k, v)| proto::ProverSpeed::build(&(*k, *v))) + .collect(), + long_pending_duration_s: Some(this.long_pending_duration.whole_seconds() as u32), + } + } +} + +impl ProtoRepr for proto::ProtocolVersion { + type Type = (String, String); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.namespace).context("namespace")?.clone(), + required(&self.protocol_version) + .context("protocol_version")? + .clone(), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + namespace: Some(this.0.clone()), + protocol_version: Some(this.1.clone()), + } + } +} + +impl ProtoRepr for proto::ClusterPriority { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster).context("cluster")?.clone(), + *required(&self.priority).context("priority")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster: Some(this.0.clone()), + priority: Some(this.1), + } + } +} + +impl ProtoRepr for proto::ProverSpeed { + type Type = (Gpu, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.gpu).context("gpu")?.parse()?, + *required(&self.speed).context("speed")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + gpu: Some(this.0.to_string()), + speed: Some(this.1), + } + } +} diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 22ec5c53485..c085c1b5455 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -46,6 +46,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -208,6 +209,18 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "async-broadcast" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cd0e2e25ea8e5f7e9df04578dc6cf5c83577fd09b1a46aaf5c85e1c33f2a7e" +dependencies = [ + "event-listener", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -275,9 +288,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-lc-rs" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a47f2fb521b70c11ce7369a6c5fa4bd6af7e5d62ec06303875bafe7c6ba245" +checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -287,9 +300,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.19.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2927c7af777b460b7ccd95f8b67acd7b4c04ec8896bf0c8e80ba30523cffc057" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" dependencies = [ "bindgen 0.69.4", "cc", @@ -355,6 +368,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom", + "instant", + "rand 0.8.5", +] + [[package]] name = "backtrace" version = "0.3.72" @@ -1331,8 +1355,18 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] @@ -1349,17 +1383,51 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.85", + "quote 1.0.36", + "strsim 0.11.1", + "syn 2.0.66", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core 0.20.10", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "debug-map-sorted" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c7dfa83618734bf9fa07aadaa1166b634e9427bb9bc5a1c2332d04d73fb721" +dependencies = [ + "itertools 0.10.5", +] + [[package]] name = "debugid" version = "0.8.0" @@ -1503,6 +1571,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + [[package]] name = "ecdsa" version = "0.14.8" @@ -1784,6 +1858,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "2.1.0" @@ -1862,6 +1946,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "fluent-uri" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17c704e9dbe1ddd863da1e6ff3567795087b1eb201ce80d8fa81162e1516500d" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "flume" version = "0.11.0" @@ -2328,6 +2421,30 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "headers" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +dependencies = [ + "base64 0.21.7", + "bytes", + "headers-core", + "http 1.1.0", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http 1.1.0", +] + [[package]] name = "heck" version = "0.3.3" @@ -2521,6 +2638,26 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-http-proxy" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d06dbdfbacf34d996c6fb540a71a684a7aae9056c71951163af8a8a4c07b9a4" +dependencies = [ + "bytes", + "futures-util", + "headers", + "http 1.1.0", + "hyper 1.3.1", + "hyper-rustls", + "hyper-util", + "pin-project-lite", + "rustls-native-certs", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "hyper-rustls" version = "0.27.2" @@ -2533,6 +2670,7 @@ dependencies = [ "hyper-util", "log", "rustls", + "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", @@ -2710,6 +2848,15 @@ dependencies = [ "regex", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -2822,6 +2969,44 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b1fb8864823fad91877e6caea0baca82e49e8db50f8e5c9f9a453e27d3330fc" +dependencies = [ + "jsonptr", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonpath-rust" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d8fe85bd70ff715f31ce8c739194b423d79811a19602115d611a3ec85d6200" +dependencies = [ + "lazy_static", + "once_cell", + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonptr" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c6e529149475ca0b2820835d3dce8fcc41c6b943ca608d32f35b449255e4627" +dependencies = [ + "fluent-uri", + "serde", + "serde_json", +] + [[package]] name = "jsonrpsee" version = "0.23.2" @@ -3006,6 +3191,19 @@ dependencies = [ "signature 2.2.0", ] +[[package]] +name = "k8s-openapi" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8847402328d8301354c94d605481f25a6bdc1ed65471fd96af8eca71141b13" +dependencies = [ + "base64 0.22.1", + "chrono", + "serde", + "serde-value", + "serde_json", +] + [[package]] name = "keccak" version = "0.1.5" @@ -3015,6 +3213,116 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "kube" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-derive", + "kube-runtime", +] + +[[package]] +name = "kube-client" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" +dependencies = [ + "base64 0.22.1", + "bytes", + "chrono", + "either", + "futures 0.3.30", + "home", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-http-proxy", + "hyper-rustls", + "hyper-timeout", + "hyper-util", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "rustls", + "rustls-pemfile 2.1.2", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" +dependencies = [ + "chrono", + "form_urlencoded", + "http 1.1.0", + "json-patch", + "k8s-openapi", + "schemars", + "serde", + "serde-value", + "serde_json", + "thiserror", +] + +[[package]] +name = "kube-derive" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" +dependencies = [ + "darling 0.20.10", + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_json", + "syn 2.0.66", +] + +[[package]] +name = "kube-runtime" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" +dependencies = [ + "ahash 0.8.11", + "async-broadcast", + "async-stream", + "async-trait", + "backoff", + "derivative", + "futures 0.3.30", + "hashbrown 0.14.5", + "json-patch", + "jsonptr", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -3812,6 +4120,51 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "pest_meta" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" +dependencies = [ + "once_cell", + "pest", + "sha2 0.10.8", +] + [[package]] name = "petgraph" version = "0.6.5" @@ -4756,9 +5109,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ "aws-lc-rs", "log", @@ -4837,9 +5190,9 @@ checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ "aws-lc-rs", "ring", @@ -4889,6 +5242,30 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "schemars" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_derive_internals", + "syn 2.0.66", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -4953,6 +5330,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ + "serde", "zeroize", ] @@ -5136,13 +5514,25 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -5187,7 +5577,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", @@ -6092,6 +6482,7 @@ dependencies = [ "futures-io", "futures-sink", "pin-project-lite", + "slab", "tokio", ] @@ -6195,6 +6586,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "base64 0.21.7", + "bitflags 2.6.0", + "bytes", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -6319,6 +6729,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -6449,9 +6865,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -7405,8 +7821,12 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -7836,6 +8256,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -7844,6 +8265,44 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_prover_autoscaler" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "axum", + "chrono", + "clap 4.5.4", + "ctrlc", + "debug-map-sorted", + "futures 0.3.30", + "k8s-openapi", + "kube", + "once_cell", + "regex", + "reqwest 0.12.5", + "ring", + "rustls", + "serde", + "serde_json", + "structopt", + "strum", + "time", + "tokio", + "tracing", + "tracing-subscriber", + "url", + "vise", + "zksync_config", + "zksync_core_leftovers", + "zksync_protobuf_config", + "zksync_prover_job_monitor", + "zksync_types", + "zksync_utils", + "zksync_vlog", +] + [[package]] name = "zksync_prover_dal" version = "0.1.0" diff --git a/prover/Cargo.toml b/prover/Cargo.toml index e95bae3d4c1..742eee649de 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -1,8 +1,5 @@ [workspace] -members = [ - "crates/bin/*", - "crates/lib/*", -] +members = ["crates/bin/*", "crates/lib/*"] resolver = "2" @@ -19,20 +16,23 @@ categories = ["cryptography"] [workspace.dependencies] # Common dependencies anyhow = "1.0" -axum = "0.7.5" async-trait = "0.1" +axum = "0.7.5" bincode = "1" chrono = "0.4.38" clap = "4.4.6" colored = "2.0" const-decoder = "0.3.0" ctrlc = "3.1" +debug-map-sorted = "0.1.1" dialoguer = "0.11" futures = "0.3" hex = "0.4" -itertools = "0.10.5" indicatif = "0.16" +itertools = "0.10.5" jemallocator = "0.5" +k8s-openapi = { version = "0.23.0", features = ["v1_30"] } +kube = { version = "0.95.0", features = ["runtime", "derive"] } local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" @@ -42,6 +42,8 @@ queues = "1.1.0" rand = "0.8" regex = "1.10.4" reqwest = "0.12" +ring = "0.17.8" +rustls = { version = "0.23.12", features = ["ring"] } serde = "1.0" serde_derive = "1.0" serde_json = "1.0" @@ -50,11 +52,13 @@ sqlx = { version = "0.8.1", default-features = false } structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" +time = "0.3.36" tokio = "1" tokio-util = "0.7.11" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" +url = "2.5.2" vise = "0.2.0" # Proving dependencies @@ -84,6 +88,7 @@ zksync_eth_client = { path = "../core/lib/eth_client" } zksync_contracts = { path = "../core/lib/contracts" } zksync_core_leftovers = { path = "../core/lib/zksync_core_leftovers" } zksync_periodic_job = { path = "../core/lib/periodic_job" } +zksync_protobuf_config = { path = "../core/lib/protobuf_config" } # Prover workspace dependencies zksync_prover_dal = { path = "crates/lib/prover_dal" } @@ -91,6 +96,7 @@ zksync_prover_fri_types = { path = "crates/lib/prover_fri_types" } zksync_prover_fri_utils = { path = "crates/lib/prover_fri_utils" } zksync_prover_keystore = { path = "crates/lib/keystore" } zksync_vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } +zksync_prover_job_monitor = { path = "crates/bin/prover_job_monitor" } # for `perf` profiling [profile.perf] diff --git a/prover/crates/bin/prover_autoscaler/Cargo.toml b/prover/crates/bin/prover_autoscaler/Cargo.toml new file mode 100644 index 00000000000..9743b45593e --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "zksync_prover_autoscaler" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_core_leftovers.workspace = true +zksync_vlog.workspace = true +zksync_utils.workspace = true +zksync_types.workspace = true +zksync_config = { workspace = true, features = ["observability_ext"] } +zksync_prover_job_monitor.workspace = true +zksync_protobuf_config.workspace = true + +debug-map-sorted.workspace = true +anyhow.workspace = true +async-trait.workspace = true +axum.workspace = true +chrono.workspace = true +clap = { workspace = true, features = ["derive"] } +ctrlc = { workspace = true, features = ["termination"] } +futures.workspace = true +k8s-openapi = { workspace = true, features = ["v1_30"] } +kube = { workspace = true, features = ["runtime", "derive"] } +once_cell.workspace = true +regex.workspace = true +reqwest = { workspace = true, features = ["json"] } +ring.workspace = true +rustls = { workspace = true, features = ["ring"] } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +structopt.workspace = true +strum.workspace = true +time.workspace = true +tokio = { workspace = true, features = ["time", "macros"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing.workspace = true +url.workspace = true +vise.workspace = true diff --git a/prover/crates/bin/prover_autoscaler/src/agent.rs b/prover/crates/bin/prover_autoscaler/src/agent.rs new file mode 100644 index 00000000000..3269a43815c --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/agent.rs @@ -0,0 +1,130 @@ +use std::net::SocketAddr; + +use anyhow::Context as _; +use axum::{ + extract::State, + response::IntoResponse, + routing::{get, post}, + Json, Router, +}; +use futures::future; +use reqwest::StatusCode; +use serde::{Deserialize, Serialize}; +use tokio::sync::watch; + +use crate::{ + cluster_types::Cluster, + k8s::{Scaler, Watcher}, +}; + +struct AppError(anyhow::Error); + +impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Something went wrong: {}", self.0), + ) + .into_response() + } +} + +pub async fn run_server( + port: u16, + watcher: Watcher, + scaler: Scaler, + mut stop_receiver: watch::Receiver, +) -> anyhow::Result<()> { + let bind_address = SocketAddr::from(([0, 0, 0, 0], port)); + tracing::debug!("Starting Autoscaler agent on {bind_address}"); + let app = create_agent_router(watcher, scaler); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding Autoscaler agent to {bind_address}"))?; + axum::serve(listener, app) + .with_graceful_shutdown(async move { + if stop_receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for Autoscaler agent was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, Autoscaler agent is shutting down"); + }) + .await + .context("Autoscaler agent failed")?; + tracing::info!("Autoscaler agent shut down"); + Ok(()) +} + +fn create_agent_router(watcher: Watcher, scaler: Scaler) -> Router { + let app = App { watcher, scaler }; + Router::new() + .route("/healthz", get(health)) + .route("/cluster", get(get_cluster)) + .route("/scale", post(scale)) + .with_state(app) +} + +// TODO: Use +// https://github.com/matter-labs/zksync-era/blob/9821a20018c367ce246dba656daab5c2e7757973/core/node/api_server/src/healthcheck.rs#L53 +// instead. +async fn health() -> &'static str { + "Ok\n" +} + +#[derive(Clone)] +struct App { + watcher: Watcher, + scaler: Scaler, +} + +async fn get_cluster(State(app): State) -> Result, AppError> { + let cluster = app.watcher.cluster.lock().await.clone(); + Ok(Json(cluster)) +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ScaleDeploymentRequest { + pub namespace: String, + pub name: String, + pub size: i32, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ScaleRequest { + pub deployments: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScaleResponse { + pub scale_result: Vec, +} + +/// To test or forse scale in particular cluster use: +/// $ curl -X POST -H "Content-Type: application/json" --data '{"deployments": [{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-f", "size":0},{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-c", "size":0}]}' :8081/scale +async fn scale( + State(app): State, + Json(payload): Json, +) -> Result, AppError> { + let handles: Vec<_> = payload + .deployments + .into_iter() + .map(|d| { + let s = app.scaler.clone(); + tokio::spawn(async move { + match s.scale(&d.namespace, &d.name, d.size).await { + Ok(()) => "".to_string(), + Err(err) => err.to_string(), + } + }) + }) + .collect(); + + let scale_result = future::join_all(handles) + .await + .into_iter() + .map(Result::unwrap) + .collect(); + Ok(Json(ScaleResponse { scale_result })) +} diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs new file mode 100644 index 00000000000..b074e0774c9 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -0,0 +1,66 @@ +use std::collections::{BTreeMap, HashMap}; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize, Serializer}; +use strum::{Display, EnumString}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Pod { + // pub name: String, // TODO: Consider if it's needed. + pub owner: String, + pub status: String, + pub changed: DateTime, +} +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Deployment { + // pub name: String, // TODO: Consider if it's needed. + pub running: i32, + pub desired: i32, +} + +fn ordered_map( + value: &HashMap, + serializer: S, +) -> Result +where + S: Serializer, +{ + let ordered: BTreeMap<_, _> = value.iter().collect(); + ordered.serialize(serializer) +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Namespace { + #[serde(serialize_with = "ordered_map")] + pub deployments: HashMap, + pub pods: HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Cluster { + pub name: String, + pub namespaces: HashMap, +} +impl Default for Cluster { + fn default() -> Self { + Self { + name: "".to_string(), + namespaces: HashMap::new(), + } + } +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Clusters { + pub clusters: HashMap, +} + +#[derive(Default, Debug, EnumString, Display, Hash, PartialEq, Eq, Clone, Copy)] +pub enum PodStatus { + #[default] + Unknown, + Running, + Pending, + LongPending, + NeedToMove, +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/mod.rs b/prover/crates/bin/prover_autoscaler/src/global/mod.rs new file mode 100644 index 00000000000..5e4afb93843 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/mod.rs @@ -0,0 +1,3 @@ +pub mod queuer; +pub mod scaler; +pub mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs new file mode 100644 index 00000000000..1ef5d96386b --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs @@ -0,0 +1,41 @@ +use std::collections::HashMap; + +use anyhow::{Context, Ok}; +use reqwest::Method; +use zksync_prover_job_monitor::autoscaler_queue_reporter::VersionedQueueReport; +use zksync_utils::http_with_retries::send_request_with_retries; + +#[derive(Debug)] +pub struct Queue { + pub queue: HashMap, +} + +#[derive(Default)] +pub struct Queuer { + pub prover_job_monitor_url: String, +} + +impl Queuer { + pub fn new(pjm_url: String) -> Self { + Self { + prover_job_monitor_url: pjm_url, + } + } + + pub async fn get_queue(&self) -> anyhow::Result { + let url = &self.prover_job_monitor_url; + let response = send_request_with_retries(url, 5, Method::GET, None, None).await; + let res = response + .map_err(|err| anyhow::anyhow!("Failed fetching queue from url: {url}: {err:?}"))? + .json::>() + .await + .context("Failed to read response as json")?; + + Ok(Queue { + queue: res + .iter() + .map(|x| (x.version.to_string(), x.report.prover_jobs.queued as u64)) + .collect::>(), + }) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs new file mode 100644 index 00000000000..9f37c4d1167 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -0,0 +1,360 @@ +use std::{collections::HashMap, str::FromStr}; + +use chrono::Utc; +use debug_map_sorted::SortedOutputExt; +use once_cell::sync::Lazy; +use regex::Regex; +use zksync_config::configs::prover_autoscaler::{Gpu, ProverAutoscalerScalerConfig}; + +use super::{queuer, watcher}; +use crate::{ + cluster_types::{Cluster, Clusters, Pod, PodStatus}, + metrics::AUTOSCALER_METRICS, + task_wiring::Task, +}; + +const DEFAULT_SPEED: u32 = 500; + +#[derive(Default, Debug, PartialEq, Eq)] +struct GPUPool { + name: String, + gpu: Gpu, + provers: HashMap, // TODO: consider using i64 everywhere to avoid type casts. + preemtions: u64, + max_pool_size: u32, +} + +impl GPUPool { + fn sum_by_pod_status(&self, ps: PodStatus) -> u32 { + self.provers.get(&ps).cloned().unwrap_or(0) + } + + fn to_key(&self) -> GPUPoolKey { + GPUPoolKey { + cluster: self.name.clone(), + gpu: self.gpu, + } + } +} + +#[derive(Debug, Eq, Hash, PartialEq)] +struct GPUPoolKey { + cluster: String, + gpu: Gpu, +} + +static PROVER_DEPLOYMENT_RE: Lazy = + Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?$").unwrap()); +static PROVER_POD_RE: Lazy = + Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?").unwrap()); + +pub struct Scaler { + /// namespace to Protocol Version configuration. + namespaces: HashMap, + watcher: watcher::Watcher, + queuer: queuer::Queuer, + + /// Which cluster to use first. + cluster_priorities: HashMap, + prover_speed: HashMap, + long_pending_duration: chrono::Duration, +} + +struct ProverPodGpu<'a> { + name: &'a str, + pod: &'a Pod, + gpu: Gpu, +} + +impl<'a> ProverPodGpu<'a> { + fn new(name: &'a str, pod: &'a Pod) -> Option> { + PROVER_POD_RE.captures(name).map(|caps| Self { + name, + pod, + gpu: Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(), + }) + } +} + +impl Scaler { + pub fn new( + watcher: watcher::Watcher, + queuer: queuer::Queuer, + config: ProverAutoscalerScalerConfig, + ) -> Self { + Self { + namespaces: config.protocol_versions, + watcher, + queuer, + cluster_priorities: config.cluster_priorities, + prover_speed: config.prover_speed, + long_pending_duration: chrono::Duration::seconds( + config.long_pending_duration.whole_seconds(), + ), + } + } + + fn convert_to_gpu_pool(&self, namespace: &String, cluster: &Cluster) -> Vec { + let mut gp_map = HashMap::new(); // + let Some(namespace_value) = &cluster.namespaces.get(namespace) else { + // No namespace in config, ignoring. + return vec![]; + }; + + for caps in namespace_value + .deployments + .keys() + .filter_map(|dn| PROVER_DEPLOYMENT_RE.captures(dn)) + { + // Processing only provers. + let gpu = + Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(); + let e = gp_map.entry(gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu, + max_pool_size: 100, // TODO: get from the agent. + ..Default::default() + }); + + // Initialize pool only if we have ready deployments. + e.provers.insert(PodStatus::Running, 0); + } + + for ppg in namespace_value + .pods + .iter() + .filter_map(|(pn, pv)| ProverPodGpu::new(pn, pv)) + { + let e = gp_map.entry(ppg.gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu: ppg.gpu, + ..Default::default() + }); + let mut status = PodStatus::from_str(&ppg.pod.status).unwrap_or_default(); + if status == PodStatus::Pending + && ppg.pod.changed < Utc::now() - self.long_pending_duration + { + status = PodStatus::LongPending; + } + tracing::info!( + "pod {}: status: {}, real status: {}", + ppg.name, + status, + ppg.pod.status + ); + e.provers.entry(status).and_modify(|n| *n += 1).or_insert(1); + } + + tracing::info!("From pods {:?}", gp_map.sorted_debug()); + + gp_map.into_values().collect() + } + + fn sorted_clusters(&self, namespace: &String, clusters: &Clusters) -> Vec { + let mut gpu_pools: Vec = clusters + .clusters + .values() + .flat_map(|c| self.convert_to_gpu_pool(namespace, c)) + .collect(); + + gpu_pools.sort_by(|a, b| { + a.gpu + .cmp(&b.gpu) // Sort by GPU first. + .then( + a.sum_by_pod_status(PodStatus::NeedToMove) + .cmp(&b.sum_by_pod_status(PodStatus::NeedToMove)), + ) // Sort by need to evict. + .then( + a.sum_by_pod_status(PodStatus::LongPending) + .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), + ) // Sort by long Pending pods. + .then(a.preemtions.cmp(&b.preemtions)) // Sort by preemtions in the cluster. + .then( + self.cluster_priorities + .get(&a.name) + .unwrap_or(&1000) + .cmp(self.cluster_priorities.get(&b.name).unwrap_or(&1000)), + ) // Sort by priority. + .then(b.max_pool_size.cmp(&a.max_pool_size)) // Reverse sort by cluster size. + }); + + gpu_pools + } + + fn speed(&self, gpu: Gpu) -> u64 { + self.prover_speed + .get(&gpu) + .cloned() + .unwrap_or(DEFAULT_SPEED) + .into() + } + + fn provers_to_speed(&self, gpu: Gpu, n: u32) -> u64 { + self.speed(gpu) * n as u64 + } + + fn normalize_queue(&self, gpu: Gpu, q: u64) -> u64 { + let speed = self.speed(gpu); + // Divide and round up if there's any remainder. + (q + speed - 1) / speed * speed + } + + fn run(&self, namespace: &String, q: u64, clusters: &Clusters) -> HashMap { + let sc = self.sorted_clusters(namespace, clusters); + tracing::debug!("Sorted clusters for namespace {}: {:?}", namespace, &sc); + + let mut total: i64 = 0; + let mut provers: HashMap = HashMap::new(); + for c in &sc { + for (status, p) in &c.provers { + match status { + PodStatus::Running | PodStatus::Pending => { + total += self.provers_to_speed(c.gpu, *p) as i64; + provers + .entry(c.to_key()) + .and_modify(|x| *x += p) + .or_insert(*p); + } + _ => (), // Ignore LongPending as not running here. + } + } + } + + // Remove unneeded pods. + if (total as u64) > self.normalize_queue(Gpu::L4, q) { + for c in sc.iter().rev() { + let mut excess_queue = total as u64 - self.normalize_queue(c.gpu, q); + let mut excess_provers = (excess_queue / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p < excess_provers { + excess_provers = *p; + excess_queue = *p as u64 * self.speed(c.gpu); + } + *p -= excess_provers; + total -= excess_queue as i64; + if total <= 0 { + break; + }; + } + } + + // Reduce load in over capacity pools. + for c in &sc { + let p = provers.entry(c.to_key()).or_default(); + if c.max_pool_size < *p { + let excess = *p - c.max_pool_size; + total -= excess as i64 * self.speed(c.gpu) as i64; + *p -= excess; + } + } + + tracing::debug!("Queue coverd with provers: {}", total); + // Add required provers. + if (total as u64) < q { + for c in &sc { + let mut required_queue = q - total as u64; + let mut required_provers = + (self.normalize_queue(c.gpu, required_queue) / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p + required_provers > c.max_pool_size { + required_provers = c.max_pool_size - *p; + required_queue = required_provers as u64 * self.speed(c.gpu); + } + *p += required_provers; + total += required_queue as i64; + } + } + + tracing::debug!("run result: provers {:?}, total: {}", &provers, total); + + provers + } +} + +#[async_trait::async_trait] +impl Task for Scaler { + async fn invoke(&self) -> anyhow::Result<()> { + let queue = self.queuer.get_queue().await.unwrap(); + + // TODO: Check that clusters data is ready. + let clusters = self.watcher.clusters.lock().await; + for (ns, ppv) in &self.namespaces { + let q = queue.queue.get(ppv).cloned().unwrap_or(0); + if q > 0 { + let provers = self.run(ns, q, &clusters); + for (k, num) in &provers { + AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] + .set(*num as u64); + } + // TODO: compare before and desired, send commands [cluster,namespace,deployment] -> provers + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use tokio::sync::Mutex; + + use super::*; + use crate::{ + cluster_types::{self, Deployment, Namespace, Pod}, + global::{queuer, watcher}, + }; + + #[test] + fn test_run() { + let watcher = watcher::Watcher { + cluster_agents: vec![], + clusters: Arc::new(Mutex::new(cluster_types::Clusters { + ..Default::default() + })), + }; + let queuer = queuer::Queuer { + prover_job_monitor_url: "".to_string(), + }; + let scaler = Scaler::new(watcher, queuer, ProverAutoscalerScalerConfig::default()); + let got = scaler.run( + &"prover".to_string(), + 1499, + &Clusters { + clusters: HashMap::from([( + "foo".to_string(), + Cluster { + name: "foo".to_string(), + namespaces: HashMap::from([( + "prover".to_string(), + Namespace { + deployments: HashMap::from([( + "prover-gpu-fri-spec-1".to_string(), + Deployment { + ..Default::default() + }, + )]), + pods: HashMap::from([( + "prover-gpu-fri-spec-1-c47644679-x9xqp".to_string(), + Pod { + status: "Running".to_string(), + ..Default::default() + }, + )]), + }, + )]), + }, + )]), + }, + ); + let want = HashMap::from([( + GPUPoolKey { + cluster: "foo".to_string(), + gpu: Gpu::L4, + }, + 3, + )]); + assert!(got == want); + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs new file mode 100644 index 00000000000..ef3ebd3b819 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -0,0 +1,89 @@ +use std::{collections::HashMap, sync::Arc}; + +use anyhow::{Context, Ok}; +use futures::future; +use reqwest::Method; +use tokio::sync::Mutex; +use url::Url; +use zksync_utils::http_with_retries::send_request_with_retries; + +use crate::{ + cluster_types::{Cluster, Clusters}, + task_wiring::Task, +}; + +#[derive(Clone)] +pub struct Watcher { + /// List of base URLs of all agents. + pub cluster_agents: Vec>, + pub clusters: Arc>, +} + +impl Watcher { + pub fn new(agent_urls: Vec) -> Self { + Self { + cluster_agents: agent_urls + .into_iter() + .map(|u| { + Arc::new( + Url::parse(&u) + .unwrap_or_else(|e| panic!("Unparsable Agent URL {}: {}", u, e)), + ) + }) + .collect(), + clusters: Arc::new(Mutex::new(Clusters { + clusters: HashMap::new(), + })), + } + } +} + +#[async_trait::async_trait] +impl Task for Watcher { + async fn invoke(&self) -> anyhow::Result<()> { + let handles: Vec<_> = self + .cluster_agents + .clone() + .into_iter() + .map(|a| { + tracing::debug!("Getting cluster data from agent {}.", a); + tokio::spawn(async move { + let url: String = a + .clone() + .join("/cluster") + .context("Failed to join URL with /cluster")? + .to_string(); + let response = + send_request_with_retries(&url, 5, Method::GET, None, None).await; + response + .map_err(|err| { + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })? + .json::() + .await + .context("Failed to read response as json") + }) + }) + .collect(); + + future::try_join_all( + future::join_all(handles) + .await + .into_iter() + .map(|h| async move { + let c = h.unwrap().unwrap(); + self.clusters + .lock() + .await + .clusters + .insert(c.name.clone(), c); + Ok(()) + }) + .collect::>(), + ) + .await + .unwrap(); + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs new file mode 100644 index 00000000000..0804b9eaa40 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs @@ -0,0 +1,5 @@ +pub use scaler::Scaler; +pub use watcher::Watcher; + +mod scaler; +mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs new file mode 100644 index 00000000000..170b0b10650 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs @@ -0,0 +1,27 @@ +use k8s_openapi::api; +use kube::api::{Api, Patch, PatchParams}; + +#[derive(Clone)] +pub struct Scaler { + pub client: kube::Client, +} + +impl Scaler { + pub async fn scale(&self, namespace: &str, name: &str, size: i32) -> anyhow::Result<()> { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + + let patch = serde_json::json!({ + "apiVersion": "apps/v1", + "kind": "Deployment", + "spec": { + "replicas": size + } + }); + let pp = PatchParams::default(); + deployments.patch(name, &pp, &Patch::Merge(patch)).await?; + tracing::info!("Scaled deployment/{} to {} replica(s).", name, size); + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs new file mode 100644 index 00000000000..8746d17663b --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -0,0 +1,138 @@ +use std::{collections::HashMap, sync::Arc}; + +use chrono::Utc; +use futures::{stream, StreamExt, TryStreamExt}; +use k8s_openapi::api; +use kube::{ + api::{Api, ResourceExt}, + runtime::{watcher, WatchStreamExt}, +}; +use tokio::sync::Mutex; + +use crate::{ + cluster_types::{Cluster, Deployment, Namespace, Pod}, + metrics::AUTOSCALER_METRICS, +}; + +#[derive(Clone)] +pub struct Watcher { + pub client: kube::Client, + pub cluster: Arc>, +} + +impl Watcher { + pub fn new(client: kube::Client, cluster_name: String, namespaces: Vec) -> Self { + let mut ns = HashMap::new(); + namespaces.into_iter().for_each(|n| { + ns.insert(n, Namespace::default()); + }); + + Self { + client, + cluster: Arc::new(Mutex::new(Cluster { + name: cluster_name, + namespaces: ns, + })), + } + } + + pub async fn run(self) -> anyhow::Result<()> { + // TODO: add actual metrics + AUTOSCALER_METRICS.protocol_version.set(1); + AUTOSCALER_METRICS.calls.inc_by(1); + + // TODO: watch for a list of namespaces, get: + // - deployments (name, running, desired) [done] + // - pods (name, parent deployment, statuses, when the last status change) [~done] + // - events (number of scheduling failures in last N seconds, which deployments) + // - events (preemptions, which deployment, when, how many) + // - pool size from GCP (name, size, which GPU) + let mut watchers = vec![]; + for namespace in self.cluster.lock().await.namespaces.keys() { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(deployments, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Deploy) + .boxed(), + ); + + let pods: Api = Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(pods, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Pod) + .boxed(), + ); + } + // select on applied events from all watchers + let mut combo_stream = stream::select_all(watchers); + // SelectAll Stream elements must have the same Item, so all packed in this: + #[allow(clippy::large_enum_variant)] + enum Watched { + Deploy(api::apps::v1::Deployment), + Pod(api::core::v1::Pod), + } + while let Some(o) = combo_stream.try_next().await? { + match o { + Watched::Deploy(d) => { + let namespace = match d.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let dep = v + .deployments + .entry(d.name_any()) + .or_insert(Deployment::default()); + let nums = d.status.clone().unwrap_or_default(); + dep.running = nums.available_replicas.unwrap_or_default(); + dep.desired = nums.replicas.unwrap_or_default(); + + tracing::info!( + "Got deployment: {}, size: {}/{} un {}", + d.name_any(), + nums.available_replicas.unwrap_or_default(), + nums.replicas.unwrap_or_default(), + nums.unavailable_replicas.unwrap_or_default(), + ) + } + Watched::Pod(p) => { + let namespace = match p.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); + pod.owner = p + .owner_references() + .iter() + .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) + .collect::>() + .join(":"); + // TODO: Collect replica sets to match deployments and pods. + let phase = p + .status + .clone() + .unwrap_or_default() + .phase + .unwrap_or_default(); + if phase != pod.status { + // TODO: try to get an idea how to set correct value on restart. + pod.changed = Utc::now(); + } + pod.status = phase; + + tracing::info!("Got pod: {}", p.name_any()) + } + } + } + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/lib.rs b/prover/crates/bin/prover_autoscaler/src/lib.rs new file mode 100644 index 00000000000..0b0d704c907 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/lib.rs @@ -0,0 +1,6 @@ +pub mod agent; +pub(crate) mod cluster_types; +pub mod global; +pub mod k8s; +pub(crate) mod metrics; +pub mod task_wiring; diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs new file mode 100644 index 00000000000..196bd6deb81 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -0,0 +1,148 @@ +use std::time::Duration; + +use anyhow::Context; +use structopt::StructOpt; +use tokio::{ + sync::{oneshot, watch}, + task::JoinHandle, +}; +use zksync_core_leftovers::temp_config_store::read_yaml_repr; +use zksync_protobuf_config::proto::prover_autoscaler; +use zksync_prover_autoscaler::{ + agent, + global::{self}, + k8s::{Scaler, Watcher}, + task_wiring::TaskRunner, +}; +use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +/// Represents the sequential number of the Prover Autoscaler type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] +pub enum AutoscalerType { + Scaler, + Agent, +} + +impl std::str::FromStr for AutoscalerType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "scaler" => Ok(AutoscalerType::Scaler), + "agent" => Ok(AutoscalerType::Agent), + other => Err(format!("{} is not a valid AutoscalerType", other)), + } + } +} + +#[derive(Debug, StructOpt)] +#[structopt(name = "Prover Autoscaler", about = "Run Prover Autoscaler components")] +struct Opt { + /// Prover Autoscaler can run Agent or Scaler type. + /// + /// Specify `agent` or `scaler` + #[structopt(short, long, default_value = "agent")] + job: AutoscalerType, + /// Name of the cluster Agent is watching. + #[structopt(long)] + cluster_name: Option, + /// Path to the configuration file. + #[structopt(long)] + config_path: std::path::PathBuf, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opt = Opt::from_args(); + let general_config = + read_yaml_repr::(&opt.config_path) + .context("general config")?; + let observability_config = general_config + .observability + .context("observability config")?; + let _observability_guard = observability_config.install()?; + // That's unfortunate that there are at least 3 different Duration in rust and we use all 3 in this repo. + // TODO: Consider updating zksync_protobuf to support std::time::Duration. + let graceful_shutdown_timeout = general_config.graceful_shutdown_timeout.unsigned_abs(); + + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(sender) = stop_signal_sender.take() { + sender.send(()).ok(); + } + }) + .context("Error setting Ctrl+C handler")?; + + let (stop_sender, stop_receiver) = watch::channel(false); + + let _ = rustls::crypto::ring::default_provider().install_default(); + let client = kube::Client::try_default().await?; + + tracing::info!("Starting ProverAutoscaler"); + + let mut tasks = vec![]; + + match opt.job { + AutoscalerType::Agent => { + let agent_config = general_config.agent_config.context("agent_config")?; + let exporter_config = PrometheusExporterConfig::pull(agent_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + + // TODO: maybe get cluster name from curl -H "Metadata-Flavor: Google" + // http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name + let watcher = Watcher::new( + client.clone(), + opt.cluster_name + .context("cluster_name is required for Agent")?, + agent_config.namespaces, + ); + let scaler = Scaler { client }; + tasks.push(tokio::spawn(watcher.clone().run())); + tasks.push(tokio::spawn(agent::run_server( + agent_config.http_port, + watcher, + scaler, + stop_receiver.clone(), + ))) + } + AutoscalerType::Scaler => { + let scaler_config = general_config.scaler_config.context("scaler_config")?; + let interval = scaler_config.scaler_run_interval.unsigned_abs(); + let exporter_config = PrometheusExporterConfig::pull(scaler_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + let watcher = global::watcher::Watcher::new(scaler_config.agents.clone()); + let queuer = global::queuer::Queuer::new(scaler_config.prover_job_monitor_url.clone()); + let scaler = global::scaler::Scaler::new(watcher.clone(), queuer, scaler_config); + tasks.extend(get_tasks(watcher, scaler, interval, stop_receiver)?); + } + } + + let mut tasks = ManagedTasks::new(tasks); + + tokio::select! { + _ = tasks.wait_single() => {}, + _ = stop_signal_receiver => { + tracing::info!("Stop signal received, shutting down"); + } + } + stop_sender.send(true).ok(); + tasks.complete(graceful_shutdown_timeout).await; + + Ok(()) +} + +fn get_tasks( + watcher: global::watcher::Watcher, + scaler: global::scaler::Scaler, + interval: Duration, + stop_receiver: watch::Receiver, +) -> anyhow::Result>>> { + let mut task_runner = TaskRunner::default(); + + task_runner.add("Watcher", interval, watcher); + task_runner.add("Scaler", interval, scaler); + + Ok(task_runner.spawn(stop_receiver)) +} diff --git a/prover/crates/bin/prover_autoscaler/src/metrics.rs b/prover/crates/bin/prover_autoscaler/src/metrics.rs new file mode 100644 index 00000000000..09cbaa6ba00 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/metrics.rs @@ -0,0 +1,14 @@ +use vise::{Counter, Gauge, LabeledFamily, Metrics}; +use zksync_config::configs::prover_autoscaler::Gpu; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "autoscaler")] +pub(crate) struct AutoscalerMetrics { + pub protocol_version: Gauge, + pub calls: Counter, + #[metrics(labels = ["target_cluster", "target_namespace", "gpu"])] + pub provers: LabeledFamily<(String, String, Gpu), Gauge, 3>, +} + +#[vise::register] +pub(crate) static AUTOSCALER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/bin/prover_autoscaler/src/task_wiring.rs b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs new file mode 100644 index 00000000000..9b60145ad9e --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use anyhow::Context; +use tracing::Instrument; + +/// Task trait to be run in ProverJobMonitor. +#[async_trait::async_trait] +pub trait Task { + async fn invoke(&self) -> anyhow::Result<()>; +} + +/// Wrapper for Task with a periodic interface. Holds information about the task and provides DB connectivity. +struct PeriodicTask { + job: Box, + name: String, + interval: Duration, +} + +impl PeriodicTask { + async fn run( + &self, + mut stop_receiver: tokio::sync::watch::Receiver, + ) -> anyhow::Result<()> { + tracing::info!( + "Started Task {} with run interval: {:?}", + self.name, + self.interval + ); + + let mut interval = tokio::time::interval(self.interval); + + while !*stop_receiver.borrow_and_update() { + interval.tick().await; + self.job + .invoke() + .instrument(tracing::info_span!("run", service_name = %self.name)) + .await + .context("failed to invoke task")?; + } + tracing::info!("Stop signal received; Task {} is shut down", self.name); + Ok(()) + } +} + +/// Wrapper on a vector of task. Makes adding/spawning tasks and sharing resources ergonomic. +#[derive(Default)] +pub struct TaskRunner { + tasks: Vec, +} + +impl TaskRunner { + pub fn add(&mut self, name: &str, interval: Duration, job: T) { + self.tasks.push(PeriodicTask { + name: name.into(), + interval, + job: Box::new(job), + }); + } + + pub fn spawn( + self, + stop_receiver: tokio::sync::watch::Receiver, + ) -> Vec>> { + self.tasks + .into_iter() + .map(|task| { + let receiver = stop_receiver.clone(); + tokio::spawn(async move { task.run(receiver).await }) + }) + .collect() + } +} From b0ec79fcb7fa120f095d987f53c67fdab92e2c79 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Wed, 9 Oct 2024 14:05:02 +0300 Subject: [PATCH 024/140] fix: bincode deserialization for VM run data (#3044) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Implement custom deserialize for VM run data ## Why ❔ serde(deserialize_if) is not working with bincode ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/lib/prover_interface/src/inputs.rs | 46 +++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 8fe192a5f51..f5f389362dd 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_object_store::{_reexports::BoxedError, serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, @@ -151,6 +151,38 @@ pub struct VMRunWitnessInputData { pub witness_block_state: WitnessStorageState, } +// skip_serializing_if for field evm_emulator_code_hash doesn't work fine with bincode, +// so we are implementing custom deserialization for it +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VMRunWitnessInputDataLegacy { + pub l1_batch_number: L1BatchNumber, + pub used_bytecodes: HashMap>, + pub initial_heap_content: Vec<(usize, U256)>, + pub protocol_version: ProtocolVersionId, + pub bootloader_code: Vec<[u8; 32]>, + pub default_account_code_hash: U256, + pub storage_refunds: Vec, + pub pubdata_costs: Vec, + pub witness_block_state: WitnessStorageState, +} + +impl From for VMRunWitnessInputData { + fn from(value: VMRunWitnessInputDataLegacy) -> Self { + Self { + l1_batch_number: value.l1_batch_number, + used_bytecodes: value.used_bytecodes, + initial_heap_content: value.initial_heap_content, + protocol_version: value.protocol_version, + bootloader_code: value.bootloader_code, + default_account_code_hash: value.default_account_code_hash, + evm_emulator_code_hash: None, + storage_refunds: value.storage_refunds, + pubdata_costs: value.pubdata_costs, + witness_block_state: value.witness_block_state, + } + } +} + impl StoredObject for VMRunWitnessInputData { const BUCKET: Bucket = Bucket::WitnessInput; @@ -160,7 +192,17 @@ impl StoredObject for VMRunWitnessInputData { format!("vm_run_data_{key}.bin") } - serialize_using_bincode!(); + fn serialize(&self) -> Result, BoxedError> { + zksync_object_store::bincode::serialize(self).map_err(Into::into) + } + + fn deserialize(bytes: Vec) -> Result { + zksync_object_store::bincode::deserialize::(&bytes).or_else(|_| { + zksync_object_store::bincode::deserialize::(&bytes) + .map(Into::into) + .map_err(Into::into) + }) + } } #[derive(Debug, Clone, Serialize, Deserialize)] From abe35bf7aea1120b77fdbd413d927e45da48d26c Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 9 Oct 2024 16:55:11 +0300 Subject: [PATCH 025/140] revert(configs): Add port parameter to ConsensusConfig (#2986) (#3046) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 25112df39d052f083bc45964f0298b3af5842cac. ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/lib/config/src/configs/consensus.rs | 1 - core/lib/config/src/testonly.rs | 1 - core/lib/protobuf_config/src/consensus.rs | 4 -- .../src/proto/core/consensus.proto | 3 -- core/node/consensus/src/testonly.rs | 1 - etc/env/consensus_config.yaml | 1 - etc/env/en_consensus_config.yaml | 1 - etc/env/file_based/general.yaml | 7 ---- zk_toolbox/crates/config/src/consts.rs | 2 + .../src/commands/chain/init/configs.rs | 25 +++++++---- .../commands/external_node/prepare_configs.rs | 38 +++++++++-------- zk_toolbox/crates/zk_inception/src/consts.rs | 23 ++++++++++ .../zk_inception/src/utils/consensus.rs | 42 +++++++++++++++++-- .../crates/zk_inception/src/utils/ports.rs | 8 +--- 14 files changed, 103 insertions(+), 54 deletions(-) diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index d864d5d44da..918d8f4adab 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -115,7 +115,6 @@ impl RpcConfig { /// Config (shared between main node and external node). #[derive(Clone, Debug, PartialEq)] pub struct ConsensusConfig { - pub port: u16, /// Local socket address to listen for the incoming connections. pub server_addr: std::net::SocketAddr, /// Public address of this node (should forward to `server_addr`) diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 960808aa6a6..a6ff30e04a9 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -802,7 +802,6 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusConfig { use configs::consensus::{ConsensusConfig, Host, NodePublicKey}; ConsensusConfig { - port: self.sample(rng), server_addr: self.sample(rng), public_addr: Host(self.sample(rng)), max_payload_size: self.sample(rng), diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index 37f0c52b7aa..81cad437fe4 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -148,9 +148,6 @@ impl ProtoRepr for proto::Config { }; Ok(Self::Type { - port: required(&self.port) - .and_then(|x| Ok((*x).try_into()?)) - .context("port")?, server_addr: required(&self.server_addr) .and_then(|x| Ok(x.parse()?)) .context("server_addr")?, @@ -185,7 +182,6 @@ impl ProtoRepr for proto::Config { fn build(this: &Self::Type) -> Self { Self { - port: Some(this.port.into()), server_addr: Some(this.server_addr.to_string()), public_addr: Some(this.public_addr.0.clone()), max_payload_size: Some(this.max_payload_size.try_into().unwrap()), diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 98b43f37f48..92527df739a 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -70,9 +70,6 @@ message Config { reserved 3; reserved "validators"; - // Port to listen on, for incoming TCP connections. - optional uint32 port = 12; // required - // IP:port to listen on, for incoming TCP connections. // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). optional string server_addr = 1; // required; IpAddr diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 2cd315ce063..04a2dfbc083 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -154,7 +154,6 @@ fn make_config( genesis_spec: Option, ) -> config::ConsensusConfig { config::ConsensusConfig { - port: cfg.server_addr.port(), server_addr: *cfg.server_addr, public_addr: config::Host(cfg.public_addr.0.clone()), max_payload_size: usize::MAX, diff --git a/etc/env/consensus_config.yaml b/etc/env/consensus_config.yaml index 2564865eeb3..304ea31fac9 100644 --- a/etc/env/consensus_config.yaml +++ b/etc/env/consensus_config.yaml @@ -1,4 +1,3 @@ -port: 3054 server_addr: "127.0.0.1:3054" public_addr: "127.0.0.1:3054" max_payload_size: 2500000 diff --git a/etc/env/en_consensus_config.yaml b/etc/env/en_consensus_config.yaml index 5c428866cb6..f759e72e891 100644 --- a/etc/env/en_consensus_config.yaml +++ b/etc/env/en_consensus_config.yaml @@ -1,4 +1,3 @@ -port: 3055 server_addr: '127.0.0.1:3055' public_addr: '127.0.0.1:3055' max_payload_size: 2500000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 017d79dbe73..a4ba8c0201a 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -375,10 +375,3 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 - -consensus: - port: 3054 - server_addr: "127.0.0.1:3054" - public_addr: "127.0.0.1:3054" - max_payload_size: 2500000 - gossip_dynamic_inbound_limit: 100 diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index f462ce33b8f..80b204cc619 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -62,6 +62,8 @@ pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; /// Default port for the explorer data fetcher service pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; +/// Default port for consensus service +pub const DEFAULT_CONSENSUS_PORT: u16 = 3054; pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs index 37ee2e076ab..d0897473b83 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs @@ -2,7 +2,7 @@ use anyhow::Context; use common::logger; use config::{ copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, - ChainConfig, ContractsConfig, EcosystemConfig, + ChainConfig, ContractsConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, }; use ethers::types::Address; use xshell::Shell; @@ -15,12 +15,13 @@ use crate::{ }, portal::update_portal_config, }, + defaults::PORT_RANGE_END, messages::{ - MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_CONSENSUS_CONFIG_MISSING_ERR, + MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, }, utils::{ - consensus::{generate_consensus_keys, get_consensus_secrets, get_genesis_specs}, + consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, ports::EcosystemPortsScanner, }, }; @@ -56,14 +57,22 @@ pub async fn init_configs( )?; } + // Initialize general config let mut general_config = chain_config.get_general_config()?; - let mut consensus_config = general_config - .consensus_config - .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; - let consensus_keys = generate_consensus_keys(); - consensus_config.genesis_spec = Some(get_genesis_specs(chain_config, &consensus_keys)); + // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` + let offset = ((chain_config.id - 1) * 100) as u16; + let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; + let consensus_port = + ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let consensus_keys = generate_consensus_keys(); + let consensus_config = get_consensus_config( + chain_config, + consensus_port, + Some(consensus_keys.clone()), + None, + )?; general_config.consensus_config = Some(consensus_config); general_config.save_with_base_path(shell, &chain_config.configs)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs index d714a0f8e84..5ab859d17f0 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -6,12 +6,12 @@ use config::{ external_node::ENConfig, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, + ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, DEFAULT_CONSENSUS_PORT, }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; use zksync_config::configs::{ - consensus::{ConsensusConfig, ConsensusSecrets, NodeSecretKey, Secret}, + consensus::{ConsensusSecrets, NodeSecretKey, Secret}, DatabaseSecrets, L1Secrets, }; use zksync_consensus_crypto::TextFmt; @@ -19,13 +19,14 @@ use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, + defaults::PORT_RANGE_END, messages::{ msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PREPARING_EN_CONFIGS, }, utils::{ - consensus::node_public_key, + consensus::{get_consensus_config, node_public_key}, ports::EcosystemPortsScanner, rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, @@ -78,12 +79,19 @@ fn prepare_configs( bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); - general_en.consensus_config = None; let main_node_consensus_config = general .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; - let mut en_consensus_config = main_node_consensus_config.clone(); + + // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` + ports.add_port_info( + main_node_consensus_config.server_addr.port(), + "Main node consensus".to_string(), + ); + let offset = ((config.id - 1) * 100) as u16; + let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; + let consensus_port = ports.allocate_port(consensus_port_range, "Consensus".to_string())?; let mut gossip_static_outbound = BTreeMap::new(); let main_node_public_key = node_public_key( @@ -93,8 +101,13 @@ fn prepare_configs( .context(MSG_CONSENSUS_SECRETS_MISSING_ERR)?, )? .context(MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR)?; + gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); - en_consensus_config.gossip_static_outbound = gossip_static_outbound; + + let en_consensus_config = + get_consensus_config(config, consensus_port, None, Some(gossip_static_outbound))?; + general_en.consensus_config = Some(en_consensus_config.clone()); + en_consensus_config.save_with_base_path(shell, en_configs_path)?; // Set secrets config let node_key = roles::node::SecretKey::generate().encode(); @@ -115,25 +128,16 @@ fn prepare_configs( }), data_availability: None, }; - + secrets.save_with_base_path(shell, en_configs_path)?; let dirs = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::ExternalNode)?; set_rocks_db_config(&mut general_en, dirs)?; - general_en.save_with_base_path(shell, en_configs_path)?; en_config.save_with_base_path(shell, en_configs_path)?; - en_consensus_config.save_with_base_path(shell, en_configs_path)?; - secrets.save_with_base_path(shell, en_configs_path)?; - let offset = 0; // This is zero because general_en ports already have a chain offset ports.allocate_ports_in_yaml( shell, &GeneralConfig::get_path_with_base_path(en_configs_path), - offset, - )?; - ports.allocate_ports_in_yaml( - shell, - &ConsensusConfig::get_path_with_base_path(en_configs_path), - offset, + 0, // This is zero because general_en ports already have a chain offset )?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index df27d2f02d2..9f81847e333 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -1,3 +1,5 @@ +use std::net::{IpAddr, Ipv4Addr}; + pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; @@ -10,6 +12,27 @@ pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; +#[allow(non_upper_case_globals)] +const kB: usize = 1024; + +/// Max payload size for consensus in bytes +pub const MAX_PAYLOAD_SIZE: usize = 2_500_000; +/// Max batch size for consensus in bytes +/// Compute a default batch size, so operators are not caught out by the missing setting +/// while we're still working on batch syncing. The batch interval is ~1 minute, +/// so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high +/// traffic there can be thousands of huge transactions that quickly fill up blocks +/// and there could be more blocks in a batch then expected. We chose a generous +/// limit so as not to prevent any legitimate batch from being transmitted. +pub const MAX_BATCH_SIZE: usize = MAX_PAYLOAD_SIZE * 5000 + kB; +/// Gossip dynamic inbound limit for consensus +pub const GOSSIP_DYNAMIC_INBOUND_LIMIT: usize = 100; + +/// Public address for consensus +pub const CONSENSUS_PUBLIC_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); +/// Server address for consensus +pub const CONSENSUS_SERVER_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); + /// Path to the JS runtime config for the block-explorer-app docker container to be mounted to pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs index 946d28a33fb..2979b4df0c1 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -1,14 +1,24 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + net::SocketAddr, +}; + use anyhow::Context as _; use config::ChainConfig; use secrecy::{ExposeSecret, Secret}; use zksync_config::configs::consensus::{ - AttesterPublicKey, AttesterSecretKey, ConsensusSecrets, GenesisSpec, NodePublicKey, - NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, WeightedAttester, - WeightedValidator, + AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, + NodePublicKey, NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, + WeightedAttester, WeightedValidator, }; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::{attester, node, validator}; +use crate::consts::{ + CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, + MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, +}; + pub(crate) fn parse_attester_committee( attesters: &[WeightedAttester], ) -> anyhow::Result { @@ -38,6 +48,32 @@ pub struct ConsensusPublicKeys { attester_key: attester::PublicKey, } +pub fn get_consensus_config( + chain_config: &ChainConfig, + consensus_port: u16, + consensus_keys: Option, + gossip_static_outbound: Option>, +) -> anyhow::Result { + let genesis_spec = + consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); + + let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, consensus_port); + let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, consensus_port); + + Ok(ConsensusConfig { + server_addr, + public_addr: Host(public_addr.encode()), + genesis_spec, + max_payload_size: MAX_PAYLOAD_SIZE, + gossip_dynamic_inbound_limit: GOSSIP_DYNAMIC_INBOUND_LIMIT, + max_batch_size: MAX_BATCH_SIZE, + gossip_static_inbound: BTreeSet::new(), + gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), + rpc: None, + debug_page_addr: None, + }) +} + pub fn generate_consensus_keys() -> ConsensusSecretKeys { ConsensusSecretKeys { validator_key: validator::SecretKey::generate(), diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zk_toolbox/crates/zk_inception/src/utils/ports.rs index 3b7b7ae7072..5102b4fd9c6 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/ports.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/ports.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, net::SocketAddr, ops::Range, path::Path}; +use std::{collections::HashMap, fmt, ops::Range, path::Path}; use anyhow::{bail, Context, Result}; use config::{ @@ -109,12 +109,6 @@ impl EcosystemPorts { } } } - } else if key.as_str().map(|s| s.ends_with("addr")).unwrap_or(false) { - let socket_addr = val.as_str().unwrap().parse::()?; - if let Some(new_port) = updated_ports.get(&socket_addr.port()) { - let new_socket_addr = SocketAddr::new(socket_addr.ip(), *new_port); - *val = Value::String(new_socket_addr.to_string()); - } } } // Continue traversing From d4ee72cf41e60006585f3687c84a088e708bc690 Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Wed, 9 Oct 2024 16:51:08 +0200 Subject: [PATCH 026/140] feat(zk_toolbox): Write a more sophisticated script for waiting tests result in CI (#3025) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ```bash #!/bin/bash # Disable immediate exit on non-zero status set +e # Start background processes sleep 5 & PID1=$! sleep 10 & PID2=$! # This job will fail false & PID3=$! sleep 20 & PID4=$! # Wait for all processes to complete and capture their exit statuses wait $PID1 STATUS1=$? wait $PID2 STATUS2=$? wait $PID3 STATUS3=$? wait $PID4 STATUS4=$? if [ $STATUS1 -ne 0 ]; then OVERALL_STATUS=1 echo "Process 1 failed with status $STATUS1" else echo "Process 1 succeeded" fi if [ $STATUS2 -ne 0 ]; then OVERALL_STATUS=1 echo "Process 2 failed with status $STATUS2" else echo "Process 2 succeeded" fi if [ $STATUS3 -ne 0 ]; then OVERALL_STATUS=1 echo "Process 3 failed with status $STATUS3" else echo "Process 3 succeeded" fi if [ $STATUS4 -ne 0 ]; then OVERALL_STATUS=1 echo "Process 4 failed with status $STATUS4" else echo "Process 4 succeeded" fi # Re-enable immediate exit on non-zero status if needed set -e # Exit with overall status exit $OVERALL_STATUS ``` This will wait for all BG tasks to finish then print: ```bash Process 59417 succeeded Process 59418 succeeded Process 59419 failed with status 1 Process 59420 succeeded ``` ## Why ❔ Now we always wait for PIDs in ci sequentially, as a result we fail, once the very first process is failing and we don’t know which process has failed, it’d be better to wait for all of them and show which was successful and which not ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- .github/workflows/ci-core-reusable.yml | 111 +++---------------------- bin/run_on_all_chains.sh | 41 +++++++++ 2 files changed, 52 insertions(+), 100 deletions(-) create mode 100755 bin/run_on_all_chains.sh diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 7098b562292..8e2c607c9ad 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -324,6 +324,11 @@ jobs: --server-db-name=zksync_server_localhost_consensus \ --chain consensus + - name: Export chain list to environment variable + run: | + CHAINS="era,validium,custom_token,consensus" + echo "CHAINS=$CHAINS" >> $GITHUB_ENV + - name: Build test dependencies run: | ci_run zk_supervisor test build @@ -350,23 +355,7 @@ jobs: - name: Run integration tests run: | - PASSED_ENV_VARS="RUN_CONTRACT_VERIFICATION_TEST" \ - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test integration --no-deps --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Init external nodes run: | @@ -388,42 +377,11 @@ jobs: - name: Run recovery tests (from snapshot) run: | - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run recovery tests (from genesis) run: | - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain era &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain validium &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run external node server run: | @@ -434,66 +392,19 @@ jobs: - name: Run integration tests en run: | - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain era &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain validium &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain custom_token &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain consensus &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test integration --no-deps --ignore-prerequisites --external-node" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Fee projection tests run: | ci_run killall -INT zksync_server || true - - ci_run zk_supervisor test fees --no-deps --no-kill --chain era &> ${{ env.FEES_LOGS_DIR }}/era.log & - PID1=$! - - ci_run zk_supervisor test fees --no-deps --no-kill --chain validium &> ${{ env.FEES_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test fees --no-deps --no-kill --chain custom_token &> ${{ env.FEES_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test fees --no-deps --no-kill --chain consensus &> ${{ env.FEES_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run revert tests run: | ci_run killall -INT zksync_server || true ci_run killall -INT zksync_external_node || true - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain validium &> ${{ env.REVERT_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain custom_token &> ${{ env.REVERT_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 - + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} # Upgrade tests should run last, because as soon as they # finish the bootloader will be different diff --git a/bin/run_on_all_chains.sh b/bin/run_on_all_chains.sh new file mode 100755 index 00000000000..68b6e81662f --- /dev/null +++ b/bin/run_on_all_chains.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# Colors for the terminal output +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color + + +command=$1 +chain_list=$2 +log_dir=$3 +IFS=',' read -r -a chains <<< "$chain_list" +pids=() +statuses=() + +# Start background processes +for i in "${!chains[@]}"; do + eval "$command --chain ${chains[$i]} &> ${log_dir}/${chains[$i]}.log" & + pids+=($!) +done + +# Wait for all processes to complete and capture their exit statuses +for i in "${!pids[@]}"; do + wait ${pids[$i]} + statuses[$i]=$? +done + +# Check exit statuses and set overall status +overall_status=0 + +for i in "${!statuses[@]}"; do + if [ ${statuses[$i]} -ne 0 ]; then + overall_status=1 + echo -e "${RED}✗ ERROR (exit code ${statuses[$i]}): ${chains[$i]}${NC}" + else + echo -e "${GREEN}✓ SUCCESS: ${chains[$i]}${NC}" + fi +done + +# Exit with overall status +exit $overall_status From 610551427d5ab129f91e69b5efb318da917457d7 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Wed, 9 Oct 2024 17:42:47 +0200 Subject: [PATCH 027/140] feat(zk_toolbox): added support for setting attester committee defined in a separate file (#2992) So far only setting committee to the value in genesis was supported which is too constraining for real world use. I've added support for specifying the attester committee in a separate yaml file and passing it to the set-attester-committee command. --- .github/workflows/ci-core-reusable.yml | 2 +- contracts | 2 +- core/lib/protobuf_config/src/lib.rs | 9 ++- core/lib/protobuf_config/src/tests.rs | 17 +++--- zk_toolbox/Cargo.lock | 12 ++++ zk_toolbox/Cargo.toml | 3 + .../crates/config/src/consensus_secrets.rs | 4 +- zk_toolbox/crates/config/src/ecosystem.rs | 10 ++-- zk_toolbox/crates/config/src/external_node.rs | 4 +- zk_toolbox/crates/config/src/general.rs | 4 +- zk_toolbox/crates/config/src/genesis.rs | 4 +- zk_toolbox/crates/config/src/lib.rs | 2 +- zk_toolbox/crates/config/src/secrets.rs | 4 +- zk_toolbox/crates/zk_inception/Cargo.toml | 8 +++ zk_toolbox/crates/zk_inception/README.md | 57 +++++++++++++++++++ zk_toolbox/crates/zk_inception/build.rs | 10 ++++ .../src/commands/chain/deploy_l2_contracts.rs | 29 ++++++++++ .../zk_inception/src/commands/chain/mod.rs | 6 ++ .../src/commands/consensus/conv.rs | 47 +++++++++++++++ .../{consensus.rs => consensus/mod.rs} | 56 +++++++++++++++--- .../src/commands/consensus/proto/mod.proto | 9 +++ .../src/commands/consensus/proto/mod.rs | 6 ++ .../src/commands/consensus/tests.rs | 19 +++++++ .../zk_inception/src/commands/portal.rs | 2 +- .../crates/zk_inception/src/utils/ports.rs | 2 +- zk_toolbox/crates/zk_supervisor/src/dals.rs | 4 +- 26 files changed, 286 insertions(+), 46 deletions(-) create mode 100644 zk_toolbox/crates/zk_inception/src/commands/consensus/conv.rs rename zk_toolbox/crates/zk_inception/src/commands/{consensus.rs => consensus/mod.rs} (86%) create mode 100644 zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.proto create mode 100644 zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/consensus/tests.rs diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 8e2c607c9ad..b2044d025c4 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -351,7 +351,7 @@ jobs: - name: Setup attester committee for the consensus chain run: | - ci_run zk_inception consensus set-attester-committee --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log + ci_run zk_inception consensus set-attester-committee --chain consensus --from-genesis &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log - name: Run integration tests run: | diff --git a/contracts b/contracts index bce4b2d0f34..aafee035db8 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit bce4b2d0f34bd87f1aaadd291772935afb1c3bd6 +Subproject commit aafee035db892689df3f7afe4b89fd6467a39313 diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 7bbe955561b..48bc5f1ce13 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -71,17 +71,16 @@ pub fn read_optional_repr(field: &Option

) -> Option { .flatten() } -pub fn decode_yaml_repr( +/// Reads a yaml file. +pub fn read_yaml_repr( path: &PathBuf, deny_unknown_fields: bool, ) -> anyhow::Result { let yaml = std::fs::read_to_string(path).with_context(|| path.display().to_string())?; - let d = serde_yaml::Deserializer::from_str(&yaml); - let this: T = zksync_protobuf::serde::Deserialize { + zksync_protobuf::serde::Deserialize { deny_unknown_fields, } - .proto(d)?; - this.read() + .proto_repr_from_yaml::(&yaml) } pub fn encode_yaml_repr(value: &T::Type) -> anyhow::Result> { diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index d653b9b92bf..c72bce0bf9a 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -2,7 +2,7 @@ use std::{path::PathBuf, str::FromStr}; use zksync_protobuf::testonly::{test_encode_all_formats, ReprConv}; -use crate::{decode_yaml_repr, proto}; +use crate::{proto, read_yaml_repr}; /// Tests config <-> proto (boilerplate) conversions. #[test] @@ -60,14 +60,11 @@ fn test_encoding() { #[test] fn verify_file_parsing() { let base_path = PathBuf::from_str("../../../etc/env/file_based/").unwrap(); - decode_yaml_repr::(&base_path.join("general.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("general.yaml"), true).unwrap(); // It's allowed to have unknown fields in wallets, e.g. we keep private key for fee account - decode_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); - decode_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("contracts.yaml"), true) - .unwrap(); - decode_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("external_node.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); + read_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("contracts.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("external_node.yaml"), true).unwrap(); } diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 297ef404698..97d4d181c52 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -1079,6 +1079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", + "serde", ] [[package]] @@ -6694,6 +6695,8 @@ dependencies = [ "eyre", "human-panic", "lazy_static", + "prost 0.12.6", + "rand", "secrecy", "serde", "serde_json", @@ -6710,6 +6713,10 @@ dependencies = [ "zksync_config", "zksync_consensus_crypto", "zksync_consensus_roles", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", + "zksync_protobuf_config", ] [[package]] @@ -6798,7 +6805,11 @@ dependencies = [ "rand", "secrecy", "serde", + "strum", + "strum_macros", + "time", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -6948,6 +6959,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index e6aec659bd6..d37c7e25677 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -34,7 +34,9 @@ zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } zksync_consensus_roles = "=0.3.0" zksync_consensus_crypto = "=0.3.0" +zksync_consensus_utils = "=0.3.0" zksync_protobuf = "=0.3.0" +zksync_protobuf_build = "=0.3.0" # External dependencies anyhow = "1.0.82" @@ -49,6 +51,7 @@ futures = "0.3.30" human-panic = "2.0" lazy_static = "1.4.0" once_cell = "1.19.0" +prost = "0.12.1" rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/zk_toolbox/crates/config/src/consensus_secrets.rs b/zk_toolbox/crates/config/src/consensus_secrets.rs index 0e5c4592d2f..da551a45279 100644 --- a/zk_toolbox/crates/config/src/consensus_secrets.rs +++ b/zk_toolbox/crates/config/src/consensus_secrets.rs @@ -2,13 +2,13 @@ use std::path::Path; use xshell::Shell; use zksync_config::configs::consensus::ConsensusSecrets; -use zksync_protobuf_config::decode_yaml_repr; +use zksync_protobuf_config::read_yaml_repr; use crate::traits::ReadConfig; impl ReadConfig for ConsensusSecrets { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index 7e7c5d4dae5..a5fcd8b7219 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -146,20 +146,20 @@ impl EcosystemConfig { .unwrap_or(self.default_chain.as_ref()) } - pub fn load_chain(&self, name: Option) -> Option { + pub fn load_chain(&self, name: Option) -> anyhow::Result { let name = name.unwrap_or(self.default_chain.clone()); self.load_chain_inner(&name) } - pub fn load_current_chain(&self) -> Option { + pub fn load_current_chain(&self) -> anyhow::Result { self.load_chain_inner(self.current_chain()) } - fn load_chain_inner(&self, name: &str) -> Option { + fn load_chain_inner(&self, name: &str) -> anyhow::Result { let path = self.chains.join(name).join(CONFIG_NAME); - let config = ChainConfigInternal::read(self.get_shell(), path.clone()).ok()?; + let config = ChainConfigInternal::read(self.get_shell(), path.clone())?; - Some(ChainConfig { + Ok(ChainConfig { id: config.id, name: config.name, chain_id: config.chain_id, diff --git a/zk_toolbox/crates/config/src/external_node.rs b/zk_toolbox/crates/config/src/external_node.rs index a07ff5dc140..7d884d3e234 100644 --- a/zk_toolbox/crates/config/src/external_node.rs +++ b/zk_toolbox/crates/config/src/external_node.rs @@ -2,7 +2,7 @@ use std::path::Path; use xshell::Shell; pub use zksync_config::configs::en_config::ENConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::EN_CONFIG_FILE, @@ -23,6 +23,6 @@ impl SaveConfig for ENConfig { impl ReadConfig for ENConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index a8e7407edd0..0079105b66c 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -6,7 +6,7 @@ use url::Url; use xshell::Shell; use zksync_config::configs::object_store::ObjectStoreMode; pub use zksync_config::configs::GeneralConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENERAL_FILE, @@ -137,7 +137,7 @@ impl SaveConfig for GeneralConfig { impl ReadConfig for GeneralConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/genesis.rs b/zk_toolbox/crates/config/src/genesis.rs index a6469893fed..933252541f4 100644 --- a/zk_toolbox/crates/config/src/genesis.rs +++ b/zk_toolbox/crates/config/src/genesis.rs @@ -3,7 +3,7 @@ use std::path::Path; use xshell::Shell; use zksync_basic_types::L1ChainId; pub use zksync_config::GenesisConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENESIS_FILE, @@ -32,6 +32,6 @@ impl SaveConfig for GenesisConfig { impl ReadConfig for GenesisConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/lib.rs b/zk_toolbox/crates/config/src/lib.rs index 1a7c5bf1d7e..b449aefe3a2 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zk_toolbox/crates/config/src/lib.rs @@ -10,7 +10,7 @@ pub use manipulations::*; pub use secrets::*; pub use wallet_creation::*; pub use wallets::*; -pub use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +pub use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; mod apps; mod chain; diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zk_toolbox/crates/config/src/secrets.rs index 02ace5da88e..cf0a9927c56 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zk_toolbox/crates/config/src/secrets.rs @@ -5,7 +5,7 @@ use common::db::DatabaseConfig; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; pub use zksync_config::configs::Secrets as SecretsConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::SECRETS_FILE, @@ -59,6 +59,6 @@ impl SaveConfig for SecretsConfig { impl ReadConfig for SecretsConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zk_toolbox/crates/zk_inception/Cargo.toml index 28b709c557b..5d42dadaed1 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zk_toolbox/crates/zk_inception/Cargo.toml @@ -35,8 +35,16 @@ zksync_basic_types.workspace = true clap-markdown.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_crypto.workspace = true +zksync_protobuf.workspace = true +zksync_protobuf_config.workspace = true +prost.workspace = true secrecy.workspace = true +[dev-dependencies] +rand.workspace = true +zksync_consensus_utils.workspace = true + [build-dependencies] eyre.workspace = true ethers.workspace = true +zksync_protobuf_build.workspace = true diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 7923afe4e98..7fbbb58c88f 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -17,8 +17,12 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception chain initialize-bridges`↴](#zk_inception-chain-initialize-bridges) - [`zk_inception chain deploy-l2-contracts`↴](#zk_inception-chain-deploy-l2-contracts) - [`zk_inception chain upgrader`↴](#zk_inception-chain-upgrader) +- [`zk_inception chain deploy-consensus-registry`↴](#zk_inception-chain-deploy-consensus-registry) +- [`zk_inception chain deploy-multicall3`↴](#zk_inception-chain-deploy-multicall3) - [`zk_inception chain deploy-paymaster`↴](#zk_inception-chain-deploy-paymaster) - [`zk_inception chain update-token-multiplier-setter`↴](#zk_inception-chain-update-token-multiplier-setter) +- [`zk_inception consensus set-attester-committee`↴](#zk_inception-consensus-set-attester-committee) +- [`zk_inception consensus get-attester-committee`↴](#zk_inception-consensus-get-attester-committee) - [`zk_inception prover`↴](#zk_inception-prover) - [`zk_inception prover init`↴](#zk_inception-prover-init) - [`zk_inception prover setup-keys`↴](#zk_inception-prover-setup-keys) @@ -364,6 +368,18 @@ Deploy Default Upgrader e.g.: `zk_inception init -a --private-key=` +## `zk_inception chain deploy-consensus-registry` + +Deploy Consensus Registry smart contract + +**Usage:** `zk_inception chain deploy-consensus-registry` + +## `zk_inception chain deploy-multicall3` + +Deploy Multicall3 smart contract + +**Usage:** `zk_inception chain deploy-multicall3` + ## `zk_inception chain deploy-paymaster` Deploy paymaster smart contract @@ -414,6 +430,47 @@ Update Token Multiplier Setter address on L1 e.g.: `zk_inception init -a --private-key=` +## `zk_inception consensus` + +Consensus related commands + +**Usage:** `zk_inception consensus ` + +###### **Subcommands:** + +- `set-attester-committee` — Set attester committee +- `get-attester-committee` — Get attester committee + +## `zk_inception consensus set-attester-committee` + +Set attester committee in the consensus registry smart contract. Requires `consensus_registry` and `multicall3` +contracts to be deployed. + +**Usage:** `zk_inception consensus set-attester-committee [OPTIONS]` + +###### **Options:** + +- `--from-genesis` — Set attester committee to `consensus.genesis_spec.attesters` in general.yaml Mutually exclusive + with `--from-file`. +- `--from-file ` — Set attester committee to committee specified in yaml file at `PATH`. + Mutually exclusive with `--from-genesis`. File format is specified in + `zk_inception/src/commands/consensus/proto/mod.proto`. Example: + + ```yaml + attesters: + - key: attester:public:secp256k1:0339d4b0cdd9896d3929631a4e5e9a5b4919f52592bec571d70bb0e50a3a824714 + weight: 1 + - key: attester:public:secp256k1:024897d8c10d7a57d108cfe2a724d7824c657f219ef5d9f7674810a6746c19fa7b + weight: 1 + ``` + +## `zk_inception consensus get-attester-committee` + +Requires `consensus_registry` and `multicall3` contracts to be deployed. Fetches attester committee from the consensus +registry contract and prints it. + +**Usage:** `zk_inception consensus get-attester-committee` + ## `zk_inception prover` Prover related commands diff --git a/zk_toolbox/crates/zk_inception/build.rs b/zk_toolbox/crates/zk_inception/build.rs index 43c8d7a5aac..92f34a542b7 100644 --- a/zk_toolbox/crates/zk_inception/build.rs +++ b/zk_toolbox/crates/zk_inception/build.rs @@ -7,5 +7,15 @@ fn main() -> eyre::Result<()> { Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json")? .generate()? .write_to_file(outdir.join("consensus_registry_abi.rs"))?; + + zksync_protobuf_build::Config { + input_root: "src/commands/consensus/proto".into(), + proto_root: "zksync/toolbox/consensus".into(), + dependencies: vec!["::zksync_protobuf_config::proto".parse().unwrap()], + protobuf_crate: "::zksync_protobuf".parse().unwrap(), + is_public: false, + } + .generate() + .unwrap(); Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 26a1d0bb325..8f0e04b5338 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -35,6 +35,7 @@ pub enum Deploy2ContractsOption { Upgrader, InitiailizeBridges, ConsensusRegistry, + Multicall3, } pub async fn run( @@ -82,6 +83,16 @@ pub async fn run( ) .await?; } + Deploy2ContractsOption::Multicall3 => { + deploy_multicall3( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + } Deploy2ContractsOption::InitiailizeBridges => { initialize_bridges( shell, @@ -184,6 +195,24 @@ pub async fn deploy_consensus_registry( .await } +pub async fn deploy_multicall3( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + build_and_deploy( + shell, + chain_config, + ecosystem_config, + forge_args, + Some("runDeployMulticall3"), + |shell, out| contracts_config.set_multicall3(&Multicall3Output::read(shell, out)?), + ) + .await +} + pub async fn deploy_l2_contracts( shell: &Shell, chain_config: &ChainConfig, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index 378309a07cb..c9a47616486 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -53,6 +53,9 @@ pub enum ChainCommands { /// Deploy L2 consensus registry #[command(alias = "consensus")] DeployConsensusRegistry(ForgeScriptArgs), + /// Deploy L2 multicall3 + #[command(alias = "multicall3")] + DeployMulticall3(ForgeScriptArgs), /// Deploy Default Upgrader #[command(alias = "upgrader")] DeployUpgrader(ForgeScriptArgs), @@ -77,6 +80,9 @@ pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<() ChainCommands::DeployConsensusRegistry(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await } + ChainCommands::DeployMulticall3(args) => { + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Multicall3).await + } ChainCommands::DeployUpgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/conv.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/conv.rs new file mode 100644 index 00000000000..c9d878c8fd3 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/conv.rs @@ -0,0 +1,47 @@ +use anyhow::Context as _; +use zksync_config::configs::consensus as config; +use zksync_consensus_crypto::TextFmt as _; +use zksync_consensus_roles::attester; +use zksync_protobuf::{ProtoFmt, ProtoRepr}; + +use super::proto; +use crate::utils::consensus::parse_attester_committee; + +#[derive(Debug, Clone, PartialEq)] +pub(super) struct SetAttesterCommitteeFile { + pub attesters: attester::Committee, +} + +impl ProtoFmt for SetAttesterCommitteeFile { + type Proto = proto::SetAttesterCommitteeFile; + + fn read(r: &Self::Proto) -> anyhow::Result { + // zksync_config was not allowed to depend on consensus crates, + // therefore to parse the config we need to go through the intermediate + // representation of consensus types defined in zksync_config. + let attesters: Vec<_> = r + .attesters + .iter() + .map(|x| x.read()) + .collect::>() + .context("attesters")?; + Ok(Self { + attesters: parse_attester_committee(&attesters)?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + attesters: self + .attesters + .iter() + .map(|a| { + ProtoRepr::build(&config::WeightedAttester { + key: config::AttesterPublicKey(a.key.encode()), + weight: a.weight, + }) + }) + .collect(), + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs similarity index 86% rename from zk_toolbox/crates/zk_inception/src/commands/consensus.rs rename to zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs index 7cf96ebe5ad..f30e37af4bc 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs @@ -1,10 +1,11 @@ -use std::{borrow::Borrow, collections::HashMap, sync::Arc}; +use std::{borrow::Borrow, collections::HashMap, path::PathBuf, sync::Arc}; /// Consensus registry contract operations. /// Includes code duplicated from `zksync_node_consensus::registry::abi`. use anyhow::Context as _; use common::logger; use config::EcosystemConfig; +use conv::*; use ethers::{ abi::Detokenize, contract::{FunctionCall, Multicall}, @@ -19,6 +20,11 @@ use zksync_consensus_roles::{attester, validator}; use crate::{messages, utils::consensus::parse_attester_committee}; +mod conv; +mod proto; +#[cfg(test)] +mod tests; + #[allow(warnings)] mod abi { include!(concat!(env!("OUT_DIR"), "/consensus_registry_abi.rs")); @@ -65,11 +71,25 @@ fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::Bls12381Sign } } +#[derive(clap::Args, Debug)] +#[group(required = true, multiple = false)] +pub struct SetAttesterCommitteeCommand { + /// Sets the attester committee in the consensus registry contract to + /// `consensus.genesis_spec.attesters` in general.yaml. + #[clap(long)] + from_genesis: bool, + /// Sets the attester committee in the consensus registry contract to + /// the committee in the yaml file. + /// File format is definied in `commands/consensus/proto/mod.proto`. + #[clap(long)] + from_file: Option, +} + #[derive(clap::Subcommand, Debug)] pub enum Command { /// Sets the attester committee in the consensus registry contract to /// `consensus.genesis_spec.attesters` in general.yaml. - SetAttesterCommittee, + SetAttesterCommittee(SetAttesterCommitteeCommand), /// Fetches the attester committee from the consensus registry contract. GetAttesterCommittee, } @@ -173,7 +193,8 @@ impl Setup { } fn new(shell: &Shell) -> anyhow::Result { - let ecosystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_config = + EcosystemConfig::from_file(shell).context("EcosystemConfig::from_file()")?; let chain = ecosystem_config .load_current_chain() .context(messages::MSG_CHAIN_NOT_INITIALIZED)?; @@ -227,9 +248,21 @@ impl Setup { attester::Committee::new(attesters.into_iter()).context("attester::Committee::new()") } - async fn set_attester_committee(&self) -> anyhow::Result { + fn read_attester_committee( + &self, + opts: &SetAttesterCommitteeCommand, + ) -> anyhow::Result { // Fetch the desired state. - let want = (|| { + if let Some(path) = &opts.from_file { + let yaml = std::fs::read_to_string(path).context("read_to_string()")?; + let file: SetAttesterCommitteeFile = zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt_from_yaml(&yaml) + .context("proto_fmt_from_yaml()")?; + return Ok(file.attesters); + } + let attesters = (|| { Some( &self .general @@ -241,8 +274,10 @@ impl Setup { ) })() .context(messages::MSG_CONSENSUS_GENESIS_SPEC_ATTESTERS_MISSING_IN_GENERAL_YAML)?; - let want = parse_attester_committee(want).context("parse_attester_committee()")?; + parse_attester_committee(attesters).context("parse_attester_committee()") + } + async fn set_attester_committee(&self, want: &attester::Committee) -> anyhow::Result<()> { let provider = self.provider().context("provider()")?; let block_id = self.last_block(&provider).await.context("last_block()")?; let governor = self.governor().context("governor()")?; @@ -337,7 +372,7 @@ impl Setup { ) .await?; txs.wait(&provider).await.context("wait()")?; - Ok(want) + Ok(()) } } @@ -345,8 +380,11 @@ impl Command { pub(crate) async fn run(self, shell: &Shell) -> anyhow::Result<()> { let setup = Setup::new(shell).context("Setup::new()")?; match self { - Self::SetAttesterCommittee => { - let want = setup.set_attester_committee().await?; + Self::SetAttesterCommittee(opts) => { + let want = setup + .read_attester_committee(&opts) + .context("read_attester_committee()")?; + setup.set_attester_committee(&want).await?; let got = setup.get_attester_committee().await?; anyhow::ensure!( got == want, diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.proto b/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.proto new file mode 100644 index 00000000000..d8a7323f714 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package zksync.toolbox.consensus; + +import "zksync/core/consensus.proto"; + +message SetAttesterCommitteeFile { + repeated core.consensus.WeightedAttester attesters = 1; +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.rs new file mode 100644 index 00000000000..61a0a047f0a --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.rs @@ -0,0 +1,6 @@ +#![allow(warnings)] + +include!(concat!( + env!("OUT_DIR"), + "/src/commands/consensus/proto/gen.rs" +)); diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/tests.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/tests.rs new file mode 100644 index 00000000000..c2f393ad229 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/tests.rs @@ -0,0 +1,19 @@ +use rand::{distributions::Distribution, Rng}; +use zksync_consensus_utils::EncodeDist; +use zksync_protobuf::testonly::{test_encode_all_formats, FmtConv}; + +use super::SetAttesterCommitteeFile; + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> SetAttesterCommitteeFile { + SetAttesterCommitteeFile { + attesters: rng.gen(), + } + } +} + +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + test_encode_all_formats::>(rng); +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/portal.rs b/zk_toolbox/crates/zk_inception/src/commands/portal.rs index 5bf21121177..f9e7fe35860 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/portal.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/portal.rs @@ -107,7 +107,7 @@ async fn validate_portal_config( continue; } // Append missing chain, chain might not be initialized, so ignoring errors - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { if let Ok(portal_chain_config) = build_portal_chain_config(&chain_config).await { portal_config.add_chain_config(&portal_chain_config); } diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zk_toolbox/crates/zk_inception/src/utils/ports.rs index 5102b4fd9c6..018fb79f345 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/ports.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/ports.rs @@ -169,7 +169,7 @@ impl EcosystemPortsScanner { // - Ecosystem directory (docker-compose files) let mut dirs = vec![ecosystem_config.config.clone()]; for chain in ecosystem_config.list_of_chains() { - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain)) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain)) { dirs.push(chain_config.configs.clone()); if let Some(external_node_config_path) = &chain_config.external_node_config_path { dirs.push(external_node_config_path.clone()); diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zk_toolbox/crates/zk_supervisor/src/dals.rs index 962a848fe00..b998eb4301d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zk_toolbox/crates/zk_supervisor/src/dals.rs @@ -1,4 +1,4 @@ -use anyhow::{anyhow, Context}; +use anyhow::Context as _; use config::{EcosystemConfig, SecretsConfig}; use url::Url; use xshell::Shell; @@ -91,7 +91,7 @@ fn get_secrets(shell: &Shell) -> anyhow::Result { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config .load_current_chain() - .ok_or(anyhow!(MSG_CHAIN_NOT_FOUND_ERR))?; + .context(MSG_CHAIN_NOT_FOUND_ERR)?; let secrets = chain_config.get_secrets_config()?; Ok(secrets) From d3edc3d817c151ed00d4fa822fdae0a746e33356 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Wed, 9 Oct 2024 22:07:17 +0200 Subject: [PATCH 028/140] feat(consensus): Support for syncing blocks before consensus genesis over p2p network (#3040) To verify the received blocks, a node is requesting the payload hash of the block from the main node. To verify the block content in a trustless way, we will need to add more data to the L1 commitment. --- Cargo.lock | 52 +- Cargo.toml | 20 +- core/lib/basic_types/src/lib.rs | 16 + ...223f4599d4128db588d8645f3d106de5f50b.json} | 8 +- core/lib/dal/Cargo.toml | 3 + core/lib/dal/src/consensus/conv.rs | 519 ++++++++++++++++++ core/lib/dal/src/consensus/mod.rs | 518 +---------------- core/lib/dal/src/consensus/proto/mod.proto | 4 + core/lib/dal/src/consensus/testonly.rs | 31 +- core/lib/dal/src/consensus/tests.rs | 8 +- .../mod.rs} | 273 +++------ core/lib/dal/src/consensus_dal/tests.rs | 186 +++++++ core/lib/dal/src/models/mod.rs | 15 +- core/lib/dal/src/models/storage_sync.rs | 8 +- core/lib/l1_contract_interface/Cargo.toml | 2 + .../src/i_executor/structures/mod.rs | 3 + .../structures/stored_batch_info.rs | 78 ++- .../src/i_executor/structures/tests.rs | 32 ++ core/lib/types/src/api/en.rs | 6 + core/lib/web3_decl/src/namespaces/en.rs | 6 + .../web3/backend_jsonrpsee/namespaces/en.rs | 9 + .../node/api_server/src/web3/namespaces/en.rs | 31 ++ core/node/consensus/src/batch.rs | 275 ---------- core/node/consensus/src/config.rs | 2 - core/node/consensus/src/en.rs | 54 +- core/node/consensus/src/era.rs | 14 +- core/node/consensus/src/lib.rs | 4 - core/node/consensus/src/mn.rs | 15 +- core/node/consensus/src/registry/tests.rs | 8 +- core/node/consensus/src/storage/connection.rs | 238 +++----- core/node/consensus/src/storage/store.rs | 199 +++---- core/node/consensus/src/storage/testonly.rs | 119 ++-- core/node/consensus/src/testonly.rs | 70 +-- core/node/consensus/src/tests/attestation.rs | 24 +- core/node/consensus/src/tests/batch.rs | 124 ----- core/node/consensus/src/tests/mod.rs | 199 +++++-- prover/Cargo.lock | 85 ++- zk_toolbox/Cargo.lock | 24 +- zk_toolbox/Cargo.toml | 10 +- .../zk_supervisor/src/commands/test/mod.rs | 2 +- 40 files changed, 1566 insertions(+), 1728 deletions(-) rename core/lib/dal/.sqlx/{query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json => query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json} (58%) create mode 100644 core/lib/dal/src/consensus/conv.rs rename core/lib/dal/src/{consensus_dal.rs => consensus_dal/mod.rs} (79%) create mode 100644 core/lib/dal/src/consensus_dal/tests.rs create mode 100644 core/lib/l1_contract_interface/src/i_executor/structures/tests.rs delete mode 100644 core/node/consensus/src/batch.rs delete mode 100644 core/node/consensus/src/tests/batch.rs diff --git a/Cargo.lock b/Cargo.lock index 55bbb4b5582..bd9f2d5ef28 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3303,6 +3303,12 @@ dependencies = [ "url", ] +[[package]] +name = "human-repr" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f58b778a5761513caf593693f8951c97a5b610841e754788400f32102eefdff1" + [[package]] name = "hyper" version = "0.14.30" @@ -9640,9 +9646,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -9681,9 +9687,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e7199c07aa14d9c3319839b98ad0496aac6e72327e70ded77ddb66329766db" +checksum = "a8001633dee671134cf572175a6c4f817904ce5f8d92e9b51f49891c5184a831" dependencies = [ "anyhow", "async-trait", @@ -9703,9 +9709,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -9724,9 +9730,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db07f7329b29737d8fd6860b350c809ae1b56ad53e26a7d0eddf3664ccb9dacb" +checksum = "061546668dd779ecb08302d2c84a6419e0093ad42aaa279bf20a8fa2ffda1be4" dependencies = [ "anyhow", "async-trait", @@ -9746,9 +9752,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a89a2d60db1ccd41438d29724a8d0d57fcf9506eb4443ea4b9205fd78c9c8e59" +checksum = "4e9789b5be26d20511bd7930bd9916d91122ff6cb09a28898563152a52f9f5eb" dependencies = [ "anyhow", "async-trait", @@ -9756,6 +9762,7 @@ dependencies = [ "build_html", "bytesize", "http-body-util", + "human-repr", "hyper 1.4.1", "hyper-util", "im", @@ -9782,9 +9789,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -9804,9 +9811,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" +checksum = "b2b2aab4ed18b13cd584f4edcc2546c8da82f89ac62e525063e12935ff28c9be" dependencies = [ "anyhow", "async-trait", @@ -9824,9 +9831,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand 0.8.5", @@ -10054,10 +10061,13 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_consensus_storage", + "zksync_consensus_utils", "zksync_contracts", "zksync_db_connection", + "zksync_l1_contract_interface", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -10352,8 +10362,10 @@ dependencies = [ name = "zksync_l1_contract_interface" version = "0.1.0" dependencies = [ + "anyhow", "hex", "once_cell", + "rand 0.8.5", "serde", "serde_json", "serde_with", @@ -10856,9 +10868,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -10877,9 +10889,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index 5d516e97aba..d597f4af754 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -232,16 +232,16 @@ zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "a233d44bbe61dc6a758a754c3b78fe4f83e56699" } # Consensus dependencies. -zksync_concurrency = "=0.3.0" -zksync_consensus_bft = "=0.3.0" -zksync_consensus_crypto = "=0.3.0" -zksync_consensus_executor = "=0.3.0" -zksync_consensus_network = "=0.3.0" -zksync_consensus_roles = "=0.3.0" -zksync_consensus_storage = "=0.3.0" -zksync_consensus_utils = "=0.3.0" -zksync_protobuf = "=0.3.0" -zksync_protobuf_build = "=0.3.0" +zksync_concurrency = "=0.5.0" +zksync_consensus_bft = "=0.5.0" +zksync_consensus_crypto = "=0.5.0" +zksync_consensus_executor = "=0.5.0" +zksync_consensus_network = "=0.5.0" +zksync_consensus_roles = "=0.5.0" +zksync_consensus_storage = "=0.5.0" +zksync_consensus_utils = "=0.5.0" +zksync_protobuf = "=0.5.0" +zksync_protobuf_build = "=0.5.0" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 8b6a7f949dd..197bd8eb7aa 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -13,6 +13,7 @@ use std::{ str::FromStr, }; +use anyhow::Context as _; pub use ethabi::{ self, ethereum_types::{ @@ -35,6 +36,21 @@ pub mod url; pub mod vm; pub mod web3; +/// Parses H256 from a slice of bytes. +pub fn parse_h256(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) +} + +/// Parses H256 from an optional slice of bytes. +pub fn parse_h256_opt(bytes: Option<&[u8]>) -> anyhow::Result { + parse_h256(bytes.context("missing data")?) +} + +/// Parses H160 from a slice of bytes. +pub fn parse_h160(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) +} + /// Account place in the global state tree is uniquely identified by its address. /// Binary this type is represented by 160 bit big-endian representation of account address. #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)] diff --git a/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json similarity index 58% rename from core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json rename to core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json index 61497cdb169..c34d38ac2d0 100644 --- a/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json +++ b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n WHERE\n number >= $1\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -10,13 +10,11 @@ } ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] }, "nullable": [ false ] }, - "hash": "d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977" + "hash": "fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b" } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index ccca49525e4..db03b8de982 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -22,8 +22,11 @@ zksync_types.workspace = true zksync_concurrency.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_storage.workspace = true +zksync_consensus_crypto.workspace = true +zksync_consensus_utils.workspace = true zksync_protobuf.workspace = true zksync_db_connection.workspace = true +zksync_l1_contract_interface.workspace = true itertools.workspace = true thiserror.workspace = true diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs new file mode 100644 index 00000000000..269c47fa2dd --- /dev/null +++ b/core/lib/dal/src/consensus/conv.rs @@ -0,0 +1,519 @@ +//! Protobuf conversion functions. +use anyhow::{anyhow, Context as _}; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node}; +use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; +use zksync_types::{ + abi, ethabi, + fee::Fee, + l1::{OpProcessingType, PriorityQueueType}, + l2::TransactionType, + parse_h160, parse_h256, + protocol_upgrade::ProtocolUpgradeTxCommonData, + transaction_request::PaymasterParams, + Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, L2TxCommonData, + Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use super::*; + +impl ProtoFmt for BlockMetadata { + type Proto = proto::BlockMetadata; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + payload_hash: read_required(&r.payload_hash).context("payload_hash")?, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + payload_hash: Some(self.payload_hash.build()), + } + } +} + +impl ProtoRepr for proto::NodeAddr { + type Type = (node::PublicKey, net::Host); + fn read(&self) -> anyhow::Result { + Ok(( + read_required(&self.key).context("key")?, + net::Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0.build()), + addr: Some(this.1 .0.clone()), + } + } +} + +impl ProtoFmt for GlobalConfig { + type Proto = proto::GlobalConfig; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + registry_address: r + .registry_address + .as_ref() + .map(|a| parse_h160(a)) + .transpose() + .context("registry_address")?, + seed_peers: r + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + seed_peers: self + .seed_peers + .iter() + .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) + .collect(), + } + } +} +impl ProtoFmt for AttestationStatus { + type Proto = proto::AttestationStatus; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + next_batch_to_attest: attester::BatchNumber( + *required(&r.next_batch_to_attest).context("next_batch_to_attest")?, + ), + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + next_batch_to_attest: Some(self.next_batch_to_attest.0), + } + } +} + +impl ProtoFmt for Payload { + type Proto = proto::Payload; + + fn read(r: &Self::Proto) -> anyhow::Result { + let protocol_version = required(&r.protocol_version) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("protocol_version")?; + let mut transactions = vec![]; + + match protocol_version { + v if v >= ProtocolVersionId::Version25 => { + anyhow::ensure!( + r.transactions.is_empty(), + "transactions should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions_v25.iter().enumerate() { + transactions.push( + tx.read() + .with_context(|| format!("transactions_v25[{i}]"))?, + ); + } + } + v => { + anyhow::ensure!( + r.transactions_v25.is_empty(), + "transactions_v25 should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions.iter().enumerate() { + transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) + } + } + } + + Ok(Self { + protocol_version, + hash: required(&r.hash) + .and_then(|h| parse_h256(h)) + .context("hash")?, + l1_batch_number: L1BatchNumber( + *required(&r.l1_batch_number).context("l1_batch_number")?, + ), + timestamp: *required(&r.timestamp).context("timestamp")?, + l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, + l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, + fair_pubdata_price: r.fair_pubdata_price, + virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, + operator_address: required(&r.operator_address) + .and_then(|a| parse_h160(a)) + .context("operator_address")?, + transactions, + last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, + }) + } + + fn build(&self) -> Self::Proto { + let mut x = Self::Proto { + protocol_version: Some((self.protocol_version as u16).into()), + hash: Some(self.hash.as_bytes().into()), + l1_batch_number: Some(self.l1_batch_number.0), + timestamp: Some(self.timestamp), + l1_gas_price: Some(self.l1_gas_price), + l2_fair_gas_price: Some(self.l2_fair_gas_price), + fair_pubdata_price: self.fair_pubdata_price, + virtual_blocks: Some(self.virtual_blocks), + operator_address: Some(self.operator_address.as_bytes().into()), + // Transactions are stored in execution order, therefore order is deterministic. + transactions: vec![], + transactions_v25: vec![], + last_in_batch: Some(self.last_in_batch), + }; + match self.protocol_version { + v if v >= ProtocolVersionId::Version25 => { + x.transactions_v25 = self.transactions.iter().map(ProtoRepr::build).collect(); + } + _ => { + x.transactions = self.transactions.iter().map(ProtoRepr::build).collect(); + } + } + x + } +} + +impl ProtoRepr for proto::TransactionV25 { + type Type = Transaction; + + fn read(&self) -> anyhow::Result { + use proto::transaction_v25::T; + let tx = match required(&self.t)? { + T::L1(l1) => abi::Transaction::L1 { + tx: required(&l1.rlp) + .and_then(|x| { + let tokens = ethabi::decode(&[abi::L2CanonicalTransaction::schema()], x) + .context("ethabi::decode()")?; + // Unwrap is safe because `ethabi::decode` does the verification. + let tx = + abi::L2CanonicalTransaction::decode(tokens.into_iter().next().unwrap()) + .context("L2CanonicalTransaction::decode()")?; + Ok(tx) + }) + .context("rlp")? + .into(), + factory_deps: l1.factory_deps.clone(), + eth_block: 0, + }, + T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), + }; + Transaction::from_abi(tx, true) + } + + fn build(tx: &Self::Type) -> Self { + let tx = abi::Transaction::try_from(tx.clone()).unwrap(); + use proto::transaction_v25::T; + Self { + t: Some(match tx { + abi::Transaction::L1 { + tx, factory_deps, .. + } => T::L1(proto::L1Transaction { + rlp: Some(ethabi::encode(&[tx.encode()])), + factory_deps, + }), + abi::Transaction::L2(tx) => T::L2(proto::L2Transaction { rlp: Some(tx) }), + }), + } + } +} + +impl ProtoRepr for proto::Transaction { + type Type = Transaction; + + fn read(&self) -> anyhow::Result { + let common_data = required(&self.common_data).context("common_data")?; + let execute = required(&self.execute).context("execute")?; + Ok(Self::Type { + common_data: match common_data { + proto::transaction::CommonData::L1(common_data) => { + anyhow::ensure!( + *required(&common_data.deadline_block) + .context("common_data.deadline_block")? + == 0 + ); + anyhow::ensure!( + required(&common_data.eth_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.eth_hash")? + == H256::default() + ); + ExecuteTransactionCommon::L1(L1TxCommonData { + sender: required(&common_data.sender_address) + .and_then(|x| parse_h160(x)) + .context("common_data.sender_address")?, + serial_id: required(&common_data.serial_id) + .map(|x| PriorityOpId(*x)) + .context("common_data.serial_id")?, + layer_2_tip_fee: required(&common_data.layer_2_tip_fee) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.layer_2_tip_fee")?, + full_fee: required(&common_data.full_fee) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.full_fee")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + op_processing_type: required(&common_data.op_processing_type) + .and_then(|x| { + OpProcessingType::try_from(u8::try_from(*x)?) + .map_err(|_| anyhow!("u8::try_from")) + }) + .context("common_data.op_processing_type")?, + priority_queue_type: required(&common_data.priority_queue_type) + .and_then(|x| { + PriorityQueueType::try_from(u8::try_from(*x)?) + .map_err(|_| anyhow!("u8::try_from")) + }) + .context("common_data.priority_queue_type")?, + eth_block: *required(&common_data.eth_block) + .context("common_data.eth_block")?, + canonical_tx_hash: required(&common_data.canonical_tx_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.canonical_tx_hash")?, + to_mint: required(&common_data.to_mint) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.to_mint")?, + refund_recipient: required(&common_data.refund_recipient_address) + .and_then(|x| parse_h160(x)) + .context("common_data.refund_recipient_address")?, + }) + } + proto::transaction::CommonData::L2(common_data) => { + ExecuteTransactionCommon::L2(L2TxCommonData { + nonce: required(&common_data.nonce) + .map(|x| Nonce(*x)) + .context("common_data.nonce")?, + fee: Fee { + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + max_priority_fee_per_gas: required( + &common_data.max_priority_fee_per_gas, + ) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_priority_fee_per_gas")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + }, + initiator_address: required(&common_data.initiator_address) + .and_then(|x| parse_h160(x)) + .context("common_data.initiator_address")?, + signature: required(&common_data.signature) + .context("common_data.signature")? + .clone(), + transaction_type: required(&common_data.transaction_type) + .and_then(|x| Ok(TransactionType::try_from(*x)?)) + .context("common_data.transaction_type")?, + input: { + match &common_data.input { + None => None, + Some(input) => Some(InputData { + hash: required(&input.hash) + .and_then(|x| parse_h256(x)) + .context("common_data.input.hash")?, + data: required(&input.data) + .context("common_data.input.data")? + .clone(), + }), + } + }, + paymaster_params: { + let params = required(&common_data.paymaster_params)?; + PaymasterParams { + paymaster: required(¶ms.paymaster_address) + .and_then(|x| parse_h160(x)) + .context("common_data.paymaster_params.paymaster_address")?, + paymaster_input: required(¶ms.paymaster_input) + .context("common_data.paymaster_params.paymaster_input")? + .clone(), + } + }, + }) + } + proto::transaction::CommonData::ProtocolUpgrade(common_data) => { + ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: required(&common_data.sender_address) + .and_then(|x| parse_h160(x)) + .context("common_data.sender_address")?, + upgrade_id: required(&common_data.upgrade_id) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("common_data.upgrade_id")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + eth_block: *required(&common_data.eth_block) + .context("common_data.eth_block")?, + canonical_tx_hash: required(&common_data.canonical_tx_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.canonical_tx_hash")?, + to_mint: required(&common_data.to_mint) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.to_mint")?, + refund_recipient: required(&common_data.refund_recipient_address) + .and_then(|x| parse_h160(x)) + .context("common_data.refund_recipient_address")?, + }) + } + }, + execute: Execute { + contract_address: execute + .contract_address + .as_ref() + .and_then(|x| parse_h160(x).ok()), + calldata: required(&execute.calldata).context("calldata")?.clone(), + value: required(&execute.value) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("execute.value")?, + factory_deps: execute.factory_deps.clone(), + }, + received_timestamp_ms: 0, // This timestamp is local to the node + raw_bytes: self.raw_bytes.as_ref().map(|x| x.clone().into()), + }) + } + + fn build(this: &Self::Type) -> Self { + let common_data = match &this.common_data { + ExecuteTransactionCommon::L1(data) => { + proto::transaction::CommonData::L1(proto::L1TxCommonData { + sender_address: Some(data.sender.as_bytes().into()), + serial_id: Some(data.serial_id.0), + deadline_block: Some(0), + layer_2_tip_fee: Some(u256_to_h256(data.layer_2_tip_fee).as_bytes().into()), + full_fee: Some(u256_to_h256(data.full_fee).as_bytes().into()), + max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), + gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), + gas_per_pubdata_limit: Some( + u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), + ), + op_processing_type: Some(data.op_processing_type as u32), + priority_queue_type: Some(data.priority_queue_type as u32), + eth_hash: Some(H256::default().as_bytes().into()), + eth_block: Some(data.eth_block), + canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), + to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), + refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), + }) + } + ExecuteTransactionCommon::L2(data) => { + proto::transaction::CommonData::L2(proto::L2TxCommonData { + nonce: Some(data.nonce.0), + gas_limit: Some(u256_to_h256(data.fee.gas_limit).as_bytes().into()), + max_fee_per_gas: Some(u256_to_h256(data.fee.max_fee_per_gas).as_bytes().into()), + max_priority_fee_per_gas: Some( + u256_to_h256(data.fee.max_priority_fee_per_gas) + .as_bytes() + .into(), + ), + gas_per_pubdata_limit: Some( + u256_to_h256(data.fee.gas_per_pubdata_limit) + .as_bytes() + .into(), + ), + initiator_address: Some(data.initiator_address.as_bytes().into()), + signature: Some(data.signature.clone()), + transaction_type: Some(data.transaction_type as u32), + input: data.input.as_ref().map(|input_data| proto::InputData { + data: Some(input_data.data.clone()), + hash: Some(input_data.hash.as_bytes().into()), + }), + paymaster_params: Some(proto::PaymasterParams { + paymaster_input: Some(data.paymaster_params.paymaster_input.clone()), + paymaster_address: Some(data.paymaster_params.paymaster.as_bytes().into()), + }), + }) + } + ExecuteTransactionCommon::ProtocolUpgrade(data) => { + proto::transaction::CommonData::ProtocolUpgrade( + proto::ProtocolUpgradeTxCommonData { + sender_address: Some(data.sender.as_bytes().into()), + upgrade_id: Some(data.upgrade_id as u32), + max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), + gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), + gas_per_pubdata_limit: Some( + u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), + ), + eth_hash: Some(H256::default().as_bytes().into()), + eth_block: Some(data.eth_block), + canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), + to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), + refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), + }, + ) + } + }; + let execute = proto::Execute { + contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), + calldata: Some(this.execute.calldata.clone()), + value: Some(u256_to_h256(this.execute.value).as_bytes().into()), + factory_deps: this.execute.factory_deps.clone(), + }; + Self { + common_data: Some(common_data), + execute: Some(execute), + raw_bytes: this.raw_bytes.as_ref().map(|inner| inner.0.clone()), + } + } +} + +impl ProtoRepr for proto::AttesterCommittee { + type Type = attester::Committee; + + fn read(&self) -> anyhow::Result { + let members: Vec<_> = self + .members + .iter() + .enumerate() + .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) + .collect::>() + .context("members")?; + Self::Type::new(members) + } + + fn build(this: &Self::Type) -> Self { + Self { + members: this.iter().map(|x| x.build()).collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 876dfe14bed..8e88265730e 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -1,29 +1,20 @@ -pub mod proto; +use std::collections::BTreeMap; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node, validator}; +use zksync_types::{ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256}; + +mod conv; +pub mod proto; #[cfg(test)] mod testonly; #[cfg(test)] mod tests; -use std::collections::BTreeMap; - -use anyhow::{anyhow, Context as _}; -use zksync_concurrency::net; -use zksync_consensus_roles::{attester, node, validator}; -use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; -use zksync_types::{ - abi, ethabi, - fee::Fee, - l1::{OpProcessingType, PriorityQueueType}, - l2::TransactionType, - protocol_upgrade::ProtocolUpgradeTxCommonData, - transaction_request::PaymasterParams, - Address, Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, - L2TxCommonData, Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::models::{parse_h160, parse_h256}; +#[derive(Debug, PartialEq, Clone)] +pub struct BlockMetadata { + pub payload_hash: validator::PayloadHash, +} /// Global config of the consensus. #[derive(Debug, PartialEq, Clone)] @@ -33,57 +24,6 @@ pub struct GlobalConfig { pub seed_peers: BTreeMap, } -impl ProtoRepr for proto::NodeAddr { - type Type = (node::PublicKey, net::Host); - fn read(&self) -> anyhow::Result { - Ok(( - read_required(&self.key).context("key")?, - net::Host(required(&self.addr).context("addr")?.clone()), - )) - } - fn build(this: &Self::Type) -> Self { - Self { - key: Some(this.0.build()), - addr: Some(this.1 .0.clone()), - } - } -} - -impl ProtoFmt for GlobalConfig { - type Proto = proto::GlobalConfig; - - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self { - genesis: read_required(&r.genesis).context("genesis")?, - registry_address: r - .registry_address - .as_ref() - .map(|a| parse_h160(a)) - .transpose() - .context("registry_address")?, - seed_peers: r - .seed_peers - .iter() - .enumerate() - .map(|(i, e)| e.read().context(i)) - .collect::>() - .context("seed_peers")?, - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - genesis: Some(self.genesis.build()), - registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), - seed_peers: self - .seed_peers - .iter() - .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) - .collect(), - } - } -} - /// Global attestation status served by /// `attestationStatus` RPC. #[derive(Debug, PartialEq, Clone)] @@ -92,26 +32,6 @@ pub struct AttestationStatus { pub next_batch_to_attest: attester::BatchNumber, } -impl ProtoFmt for AttestationStatus { - type Proto = proto::AttestationStatus; - - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self { - genesis: read_required(&r.genesis).context("genesis")?, - next_batch_to_attest: attester::BatchNumber( - *required(&r.next_batch_to_attest).context("next_batch_to_attest")?, - ), - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - genesis: Some(self.genesis.build()), - next_batch_to_attest: Some(self.next_batch_to_attest.0), - } - } -} - /// L2 block (= miniblock) payload. #[derive(Debug, PartialEq)] pub struct Payload { @@ -128,88 +48,6 @@ pub struct Payload { pub last_in_batch: bool, } -impl ProtoFmt for Payload { - type Proto = proto::Payload; - - fn read(r: &Self::Proto) -> anyhow::Result { - let protocol_version = required(&r.protocol_version) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("protocol_version")?; - let mut transactions = vec![]; - - match protocol_version { - v if v >= ProtocolVersionId::Version25 => { - anyhow::ensure!( - r.transactions.is_empty(), - "transactions should be empty in protocol_version {v}" - ); - for (i, tx) in r.transactions_v25.iter().enumerate() { - transactions.push( - tx.read() - .with_context(|| format!("transactions_v25[{i}]"))?, - ); - } - } - v => { - anyhow::ensure!( - r.transactions_v25.is_empty(), - "transactions_v25 should be empty in protocol_version {v}" - ); - for (i, tx) in r.transactions.iter().enumerate() { - transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) - } - } - } - - Ok(Self { - protocol_version, - hash: required(&r.hash) - .and_then(|h| parse_h256(h)) - .context("hash")?, - l1_batch_number: L1BatchNumber( - *required(&r.l1_batch_number).context("l1_batch_number")?, - ), - timestamp: *required(&r.timestamp).context("timestamp")?, - l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, - l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, - fair_pubdata_price: r.fair_pubdata_price, - virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, - operator_address: required(&r.operator_address) - .and_then(|a| parse_h160(a)) - .context("operator_address")?, - transactions, - last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, - }) - } - - fn build(&self) -> Self::Proto { - let mut x = Self::Proto { - protocol_version: Some((self.protocol_version as u16).into()), - hash: Some(self.hash.as_bytes().into()), - l1_batch_number: Some(self.l1_batch_number.0), - timestamp: Some(self.timestamp), - l1_gas_price: Some(self.l1_gas_price), - l2_fair_gas_price: Some(self.l2_fair_gas_price), - fair_pubdata_price: self.fair_pubdata_price, - virtual_blocks: Some(self.virtual_blocks), - operator_address: Some(self.operator_address.as_bytes().into()), - // Transactions are stored in execution order, therefore order is deterministic. - transactions: vec![], - transactions_v25: vec![], - last_in_batch: Some(self.last_in_batch), - }; - match self.protocol_version { - v if v >= ProtocolVersionId::Version25 => { - x.transactions_v25 = self.transactions.iter().map(ProtoRepr::build).collect(); - } - _ => { - x.transactions = self.transactions.iter().map(ProtoRepr::build).collect(); - } - } - x - } -} - impl Payload { pub fn decode(payload: &validator::Payload) -> anyhow::Result { zksync_protobuf::decode(&payload.0) @@ -219,337 +57,3 @@ impl Payload { validator::Payload(zksync_protobuf::encode(self)) } } - -impl ProtoRepr for proto::TransactionV25 { - type Type = Transaction; - - fn read(&self) -> anyhow::Result { - use proto::transaction_v25::T; - let tx = match required(&self.t)? { - T::L1(l1) => abi::Transaction::L1 { - tx: required(&l1.rlp) - .and_then(|x| { - let tokens = ethabi::decode(&[abi::L2CanonicalTransaction::schema()], x) - .context("ethabi::decode()")?; - // Unwrap is safe because `ethabi::decode` does the verification. - let tx = - abi::L2CanonicalTransaction::decode(tokens.into_iter().next().unwrap()) - .context("L2CanonicalTransaction::decode()")?; - Ok(tx) - }) - .context("rlp")? - .into(), - factory_deps: l1.factory_deps.clone(), - eth_block: 0, - }, - T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), - }; - Transaction::from_abi(tx, true) - } - - fn build(tx: &Self::Type) -> Self { - let tx = abi::Transaction::try_from(tx.clone()).unwrap(); - use proto::transaction_v25::T; - Self { - t: Some(match tx { - abi::Transaction::L1 { - tx, factory_deps, .. - } => T::L1(proto::L1Transaction { - rlp: Some(ethabi::encode(&[tx.encode()])), - factory_deps, - }), - abi::Transaction::L2(tx) => T::L2(proto::L2Transaction { rlp: Some(tx) }), - }), - } - } -} - -impl ProtoRepr for proto::Transaction { - type Type = Transaction; - - fn read(&self) -> anyhow::Result { - let common_data = required(&self.common_data).context("common_data")?; - let execute = required(&self.execute).context("execute")?; - Ok(Self::Type { - common_data: match common_data { - proto::transaction::CommonData::L1(common_data) => { - anyhow::ensure!( - *required(&common_data.deadline_block) - .context("common_data.deadline_block")? - == 0 - ); - anyhow::ensure!( - required(&common_data.eth_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.eth_hash")? - == H256::default() - ); - ExecuteTransactionCommon::L1(L1TxCommonData { - sender: required(&common_data.sender_address) - .and_then(|x| parse_h160(x)) - .context("common_data.sender_address")?, - serial_id: required(&common_data.serial_id) - .map(|x| PriorityOpId(*x)) - .context("common_data.serial_id")?, - layer_2_tip_fee: required(&common_data.layer_2_tip_fee) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.layer_2_tip_fee")?, - full_fee: required(&common_data.full_fee) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.full_fee")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - op_processing_type: required(&common_data.op_processing_type) - .and_then(|x| { - OpProcessingType::try_from(u8::try_from(*x)?) - .map_err(|_| anyhow!("u8::try_from")) - }) - .context("common_data.op_processing_type")?, - priority_queue_type: required(&common_data.priority_queue_type) - .and_then(|x| { - PriorityQueueType::try_from(u8::try_from(*x)?) - .map_err(|_| anyhow!("u8::try_from")) - }) - .context("common_data.priority_queue_type")?, - eth_block: *required(&common_data.eth_block) - .context("common_data.eth_block")?, - canonical_tx_hash: required(&common_data.canonical_tx_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.canonical_tx_hash")?, - to_mint: required(&common_data.to_mint) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.to_mint")?, - refund_recipient: required(&common_data.refund_recipient_address) - .and_then(|x| parse_h160(x)) - .context("common_data.refund_recipient_address")?, - }) - } - proto::transaction::CommonData::L2(common_data) => { - ExecuteTransactionCommon::L2(L2TxCommonData { - nonce: required(&common_data.nonce) - .map(|x| Nonce(*x)) - .context("common_data.nonce")?, - fee: Fee { - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - max_priority_fee_per_gas: required( - &common_data.max_priority_fee_per_gas, - ) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_priority_fee_per_gas")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - }, - initiator_address: required(&common_data.initiator_address) - .and_then(|x| parse_h160(x)) - .context("common_data.initiator_address")?, - signature: required(&common_data.signature) - .context("common_data.signature")? - .clone(), - transaction_type: required(&common_data.transaction_type) - .and_then(|x| Ok(TransactionType::try_from(*x)?)) - .context("common_data.transaction_type")?, - input: { - match &common_data.input { - None => None, - Some(input) => Some(InputData { - hash: required(&input.hash) - .and_then(|x| parse_h256(x)) - .context("common_data.input.hash")?, - data: required(&input.data) - .context("common_data.input.data")? - .clone(), - }), - } - }, - paymaster_params: { - let params = required(&common_data.paymaster_params)?; - PaymasterParams { - paymaster: required(¶ms.paymaster_address) - .and_then(|x| parse_h160(x)) - .context("common_data.paymaster_params.paymaster_address")?, - paymaster_input: required(¶ms.paymaster_input) - .context("common_data.paymaster_params.paymaster_input")? - .clone(), - } - }, - }) - } - proto::transaction::CommonData::ProtocolUpgrade(common_data) => { - ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: required(&common_data.sender_address) - .and_then(|x| parse_h160(x)) - .context("common_data.sender_address")?, - upgrade_id: required(&common_data.upgrade_id) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("common_data.upgrade_id")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - eth_block: *required(&common_data.eth_block) - .context("common_data.eth_block")?, - canonical_tx_hash: required(&common_data.canonical_tx_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.canonical_tx_hash")?, - to_mint: required(&common_data.to_mint) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.to_mint")?, - refund_recipient: required(&common_data.refund_recipient_address) - .and_then(|x| parse_h160(x)) - .context("common_data.refund_recipient_address")?, - }) - } - }, - execute: Execute { - contract_address: execute - .contract_address - .as_ref() - .and_then(|x| parse_h160(x).ok()), - calldata: required(&execute.calldata).context("calldata")?.clone(), - value: required(&execute.value) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("execute.value")?, - factory_deps: execute.factory_deps.clone(), - }, - received_timestamp_ms: 0, // This timestamp is local to the node - raw_bytes: self.raw_bytes.as_ref().map(|x| x.clone().into()), - }) - } - - fn build(this: &Self::Type) -> Self { - let common_data = match &this.common_data { - ExecuteTransactionCommon::L1(data) => { - proto::transaction::CommonData::L1(proto::L1TxCommonData { - sender_address: Some(data.sender.as_bytes().into()), - serial_id: Some(data.serial_id.0), - deadline_block: Some(0), - layer_2_tip_fee: Some(u256_to_h256(data.layer_2_tip_fee).as_bytes().into()), - full_fee: Some(u256_to_h256(data.full_fee).as_bytes().into()), - max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), - gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), - gas_per_pubdata_limit: Some( - u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), - ), - op_processing_type: Some(data.op_processing_type as u32), - priority_queue_type: Some(data.priority_queue_type as u32), - eth_hash: Some(H256::default().as_bytes().into()), - eth_block: Some(data.eth_block), - canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), - to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), - refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), - }) - } - ExecuteTransactionCommon::L2(data) => { - proto::transaction::CommonData::L2(proto::L2TxCommonData { - nonce: Some(data.nonce.0), - gas_limit: Some(u256_to_h256(data.fee.gas_limit).as_bytes().into()), - max_fee_per_gas: Some(u256_to_h256(data.fee.max_fee_per_gas).as_bytes().into()), - max_priority_fee_per_gas: Some( - u256_to_h256(data.fee.max_priority_fee_per_gas) - .as_bytes() - .into(), - ), - gas_per_pubdata_limit: Some( - u256_to_h256(data.fee.gas_per_pubdata_limit) - .as_bytes() - .into(), - ), - initiator_address: Some(data.initiator_address.as_bytes().into()), - signature: Some(data.signature.clone()), - transaction_type: Some(data.transaction_type as u32), - input: data.input.as_ref().map(|input_data| proto::InputData { - data: Some(input_data.data.clone()), - hash: Some(input_data.hash.as_bytes().into()), - }), - paymaster_params: Some(proto::PaymasterParams { - paymaster_input: Some(data.paymaster_params.paymaster_input.clone()), - paymaster_address: Some(data.paymaster_params.paymaster.as_bytes().into()), - }), - }) - } - ExecuteTransactionCommon::ProtocolUpgrade(data) => { - proto::transaction::CommonData::ProtocolUpgrade( - proto::ProtocolUpgradeTxCommonData { - sender_address: Some(data.sender.as_bytes().into()), - upgrade_id: Some(data.upgrade_id as u32), - max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), - gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), - gas_per_pubdata_limit: Some( - u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), - ), - eth_hash: Some(H256::default().as_bytes().into()), - eth_block: Some(data.eth_block), - canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), - to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), - refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), - }, - ) - } - }; - let execute = proto::Execute { - contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), - calldata: Some(this.execute.calldata.clone()), - value: Some(u256_to_h256(this.execute.value).as_bytes().into()), - factory_deps: this.execute.factory_deps.clone(), - }; - Self { - common_data: Some(common_data), - execute: Some(execute), - raw_bytes: this.raw_bytes.as_ref().map(|inner| inner.0.clone()), - } - } -} - -impl ProtoRepr for proto::AttesterCommittee { - type Type = attester::Committee; - - fn read(&self) -> anyhow::Result { - let members: Vec<_> = self - .members - .iter() - .enumerate() - .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) - .collect::>() - .context("members")?; - Self::Type::new(members) - } - - fn build(this: &Self::Type) -> Self { - Self { - members: this.iter().map(|x| x.build()).collect(), - } - } -} diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index ab1245f3ef6..421904bf966 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -6,6 +6,10 @@ import "zksync/roles/validator.proto"; import "zksync/roles/attester.proto"; import "zksync/roles/node.proto"; +message BlockMetadata { + optional roles.validator.PayloadHash payload_hash = 1; // required +} + message Payload { // zksync-era ProtocolVersionId optional uint32 protocol_version = 9; // required; u16 diff --git a/core/lib/dal/src/consensus/testonly.rs b/core/lib/dal/src/consensus/testonly.rs index 904a4c563d2..13086323b17 100644 --- a/core/lib/dal/src/consensus/testonly.rs +++ b/core/lib/dal/src/consensus/testonly.rs @@ -1,11 +1,17 @@ -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; +use rand::{distributions::Distribution, Rng}; +use zksync_consensus_utils::EncodeDist; -use super::AttestationStatus; +use super::*; -impl Distribution for Standard { +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> BlockMetadata { + BlockMetadata { + payload_hash: rng.gen(), + } + } +} + +impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> AttestationStatus { AttestationStatus { genesis: rng.gen(), @@ -13,3 +19,16 @@ impl Distribution for Standard { } } } + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> GlobalConfig { + GlobalConfig { + genesis: rng.gen(), + registry_address: Some(rng.gen()), + seed_peers: self + .sample_range(rng) + .map(|_| (rng.gen(), self.sample(rng))) + .collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index 7059f1a74ea..e8342b7446c 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -4,7 +4,7 @@ use rand::Rng; use zksync_concurrency::ctx; use zksync_protobuf::{ repr::{decode, encode}, - testonly::{test_encode, test_encode_random}, + testonly::{test_encode, test_encode_all_formats, FmtConv}, ProtoRepr, }; use zksync_test_account::Account; @@ -12,7 +12,7 @@ use zksync_types::{ web3::Bytes, Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, }; -use super::{proto, AttestationStatus, Payload}; +use super::*; use crate::tests::mock_protocol_upgrade_transaction; fn execute(rng: &mut impl Rng) -> Execute { @@ -59,7 +59,9 @@ fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { fn test_encoding() { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - test_encode_random::(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); encode_decode::(l1_transaction(rng)); encode_decode::(l2_transaction(rng)); encode_decode::(l1_transaction(rng)); diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal/mod.rs similarity index 79% rename from core/lib/dal/src/consensus_dal.rs rename to core/lib/dal/src/consensus_dal/mod.rs index dd976f22086..eb3385a992a 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal/mod.rs @@ -1,16 +1,25 @@ use anyhow::Context as _; +use zksync_consensus_crypto::keccak256::Keccak256; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BlockStoreState, ReplicaState}; +use zksync_consensus_storage::{BlockStoreState, Last, ReplicaState}; use zksync_db_connection::{ connection::Connection, error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_types::L2BlockNumber; +use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_types::{L1BatchNumber, L2BlockNumber}; -pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; +pub use crate::consensus::{proto, AttestationStatus, BlockMetadata, GlobalConfig, Payload}; use crate::{Core, CoreDal}; +#[cfg(test)] +mod tests; + +pub fn batch_hash(info: &StoredBatchInfo) -> attester::BatchHash { + attester::BatchHash(Keccak256::from_bytes(info.hash().0)) +} + /// Storage access methods for `zksync_core::consensus` module. #[derive(Debug)] pub struct ConsensusDal<'a, 'c> { @@ -305,47 +314,63 @@ impl ConsensusDal<'_, '_> { Ok(next) } - /// Fetches the last consensus certificate. + /// Fetches the block store state. + /// The blocks that are available to consensus are either pre-genesis or + /// have a consensus certificate. /// Currently, certificates are NOT generated synchronously with L2 blocks, - /// so it might NOT be the certificate for the last L2 block. - pub async fn block_certificates_range(&mut self) -> anyhow::Result { - // It cannot be older than genesis first block. - let mut start = self + /// so the `BlockStoreState.last` might be different than the last block in storage. + pub async fn block_store_state(&mut self) -> anyhow::Result { + let first = self.first_block().await.context("first_block()")?; + let cfg = self .global_config() - .await? - .context("genesis()")? - .genesis - .first_block; - start = start.max(self.first_block().await.context("first_block()")?); - let row = sqlx::query!( + .await + .context("global_config()")? + .context("global config is missing")?; + + // If there is a cert in storage, then the block range visible to consensus + // is [first block, block of last cert]. + if let Some(row) = sqlx::query!( r#" SELECT certificate FROM miniblocks_consensus - WHERE - number >= $1 ORDER BY number DESC LIMIT 1 "#, - i64::try_from(start.0)?, ) .instrument("block_certificate_range") .report_latency() .fetch_optional(self.storage) - .await?; - Ok(BlockStoreState { - first: start, - last: row - .map(|row| { + .await? + { + return Ok(BlockStoreState { + first, + last: Some(Last::Final( zksync_protobuf::serde::Deserialize { deny_unknown_fields: true, } - .proto_fmt(row.certificate) - }) - .transpose()?, + .proto_fmt(row.certificate)?, + )), + }); + } + + // Otherwise it is [first block, min(genesis.first_block-1,last block)]. + let next = self + .next_block() + .await + .context("next_block()")? + .min(cfg.genesis.first_block); + Ok(BlockStoreState { + first, + // unwrap is ok, because `next > first >= 0`. + last: if next > first { + Some(Last::PreGenesis(next.prev().unwrap())) + } else { + None + }, }) } @@ -461,6 +486,19 @@ impl ConsensusDal<'_, '_> { .next()) } + /// Fetches L2 block metadata for the given block number. + pub async fn block_metadata( + &mut self, + n: validator::BlockNumber, + ) -> anyhow::Result> { + let Some(b) = self.block_payload(n).await.context("block_payload()")? else { + return Ok(None); + }; + Ok(Some(BlockMetadata { + payload_hash: b.encode().hash(), + })) + } + /// Inserts a certificate for the L2 block `cert.header().number`. /// Fails if certificate doesn't match the stored block. pub async fn insert_block_certificate( @@ -558,15 +596,29 @@ impl ConsensusDal<'_, '_> { )) } + /// Fetches the L1 batch info for the given number. + pub async fn batch_info( + &mut self, + number: attester::BatchNumber, + ) -> anyhow::Result> { + let n = L1BatchNumber(number.0.try_into().context("overflow")?); + Ok(self + .storage + .blocks_dal() + .get_l1_batch_metadata(n) + .await + .context("get_l1_batch_metadata()")? + .map(|x| StoredBatchInfo::from(&x))) + } + /// Inserts a certificate for the L1 batch. /// Noop if a certificate for the same L1 batch is already present. /// Verification against previously stored attester committee is performed. - /// Batch hash is not verified - it cannot be performed due to circular dependency on - /// `zksync_l1_contract_interface`. + /// Batch hash verification is performed. pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, - ) -> anyhow::Result<()> { + ) -> Result<(), InsertCertificateError> { let cfg = self .global_config() .await @@ -577,6 +629,16 @@ impl ConsensusDal<'_, '_> { .await .context("attester_committee()")? .context("attester committee is missing")?; + let hash = batch_hash( + &self + .batch_info(cert.message.number) + .await + .context("batch()")? + .context("batch is missing")?, + ); + if cert.message.hash != hash { + return Err(InsertCertificateError::PayloadMismatch); + } cert.verify(cfg.genesis.hash(), &committee) .context("cert.verify()")?; sqlx::query!( @@ -711,158 +773,3 @@ impl ConsensusDal<'_, '_> { })) } } - -#[cfg(test)] -mod tests { - use rand::Rng as _; - use zksync_consensus_roles::{attester, validator}; - use zksync_consensus_storage::ReplicaState; - use zksync_types::ProtocolVersion; - - use super::GlobalConfig; - use crate::{ - tests::{create_l1_batch_header, create_l2_block_header}, - ConnectionPool, Core, CoreDal, - }; - - #[tokio::test] - async fn replica_state_read_write() { - let rng = &mut rand::thread_rng(); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); - for n in 0..3 { - let setup = validator::testonly::Setup::new(rng, 3); - let mut genesis = (*setup.genesis).clone(); - genesis.fork_number = validator::ForkNumber(n); - let cfg = GlobalConfig { - genesis: genesis.with_hash(), - registry_address: Some(rng.gen()), - seed_peers: [].into(), // TODO: rng.gen() for Host - }; - conn.consensus_dal() - .try_update_global_config(&cfg) - .await - .unwrap(); - assert_eq!( - cfg, - conn.consensus_dal().global_config().await.unwrap().unwrap() - ); - assert_eq!( - ReplicaState::default(), - conn.consensus_dal().replica_state().await.unwrap() - ); - for _ in 0..5 { - let want: ReplicaState = rng.gen(); - conn.consensus_dal().set_replica_state(&want).await.unwrap(); - assert_eq!( - cfg, - conn.consensus_dal().global_config().await.unwrap().unwrap() - ); - assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); - } - } - } - - #[tokio::test] - async fn test_batch_certificate() { - let rng = &mut rand::thread_rng(); - let setup = validator::testonly::Setup::new(rng, 3); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - let cfg = GlobalConfig { - genesis: setup.genesis.clone(), - registry_address: Some(rng.gen()), - seed_peers: [].into(), - }; - conn.consensus_dal() - .try_update_global_config(&cfg) - .await - .unwrap(); - - let mut make_cert = |number: attester::BatchNumber| { - let m = attester::Batch { - genesis: setup.genesis.hash(), - hash: rng.gen(), - number, - }; - let mut sigs = attester::MultiSig::default(); - for k in &setup.attester_keys { - sigs.add(k.public(), k.sign_msg(m.clone()).sig); - } - attester::BatchQC { - message: m, - signatures: sigs, - } - }; - - // Required for inserting l2 blocks - conn.protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion::default()) - .await - .unwrap(); - - // Insert some mock L2 blocks and L1 batches - let mut block_number = 0; - let mut batch_number = 0; - for _ in 0..3 { - for _ in 0..3 { - block_number += 1; - let l2_block = create_l2_block_header(block_number); - conn.blocks_dal().insert_l2_block(&l2_block).await.unwrap(); - } - batch_number += 1; - let l1_batch = create_l1_batch_header(batch_number); - conn.blocks_dal() - .insert_mock_l1_batch(&l1_batch) - .await - .unwrap(); - conn.blocks_dal() - .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) - .await - .unwrap(); - } - - let n = attester::BatchNumber(batch_number.into()); - - // Insert a batch certificate for the last L1 batch. - let want = make_cert(n); - conn.consensus_dal() - .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) - .await - .unwrap(); - conn.consensus_dal() - .insert_batch_certificate(&want) - .await - .unwrap(); - - // Reinserting a cert should fail. - assert!(conn - .consensus_dal() - .insert_batch_certificate(&make_cert(n)) - .await - .is_err()); - - // Retrieve the latest certificate. - let got_n = conn - .consensus_dal() - .last_batch_certificate_number() - .await - .unwrap() - .unwrap(); - let got = conn - .consensus_dal() - .batch_certificate(got_n) - .await - .unwrap() - .unwrap(); - assert_eq!(got, want); - - // Try insert batch certificate for non-existing batch - assert!(conn - .consensus_dal() - .insert_batch_certificate(&make_cert(n.next())) - .await - .is_err()); - } -} diff --git a/core/lib/dal/src/consensus_dal/tests.rs b/core/lib/dal/src/consensus_dal/tests.rs new file mode 100644 index 00000000000..772e7b2bf5e --- /dev/null +++ b/core/lib/dal/src/consensus_dal/tests.rs @@ -0,0 +1,186 @@ +use rand::Rng as _; +use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_storage::ReplicaState; +use zksync_types::{ + block::L1BatchTreeData, + commitment::{L1BatchCommitmentArtifacts, L1BatchCommitmentHash}, + ProtocolVersion, +}; + +use super::*; +use crate::{ + tests::{create_l1_batch_header, create_l2_block_header}, + ConnectionPool, Core, CoreDal, +}; + +#[tokio::test] +async fn replica_state_read_write() { + let rng = &mut rand::thread_rng(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); + for n in 0..3 { + let setup = validator::testonly::Setup::new(rng, 3); + let mut genesis = (*setup.genesis).clone(); + genesis.fork_number = validator::ForkNumber(n); + let cfg = GlobalConfig { + genesis: genesis.with_hash(), + registry_address: Some(rng.gen()), + seed_peers: [].into(), // TODO: rng.gen() for Host + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); + assert_eq!( + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() + ); + assert_eq!( + ReplicaState::default(), + conn.consensus_dal().replica_state().await.unwrap() + ); + for _ in 0..5 { + let want: ReplicaState = rng.gen(); + conn.consensus_dal().set_replica_state(&want).await.unwrap(); + assert_eq!( + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() + ); + assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); + } + } +} + +#[tokio::test] +async fn test_batch_certificate() { + let rng = &mut rand::thread_rng(); + let setup = validator::testonly::Setup::new(rng, 3); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + let cfg = GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: Some(rng.gen()), + seed_peers: [].into(), + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); + + let make_cert = |number: attester::BatchNumber, hash: attester::BatchHash| { + let m = attester::Batch { + genesis: setup.genesis.hash(), + hash, + number, + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } + attester::BatchQC { + message: m, + signatures: sigs, + } + }; + + // Required for inserting l2 blocks + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + // Insert some mock L2 blocks and L1 batches + let mut block_number = 0; + let mut batch_number = 0; + for _ in 0..3 { + for _ in 0..3 { + block_number += 1; + let l2_block = create_l2_block_header(block_number); + conn.blocks_dal().insert_l2_block(&l2_block).await.unwrap(); + } + batch_number += 1; + let l1_batch = create_l1_batch_header(batch_number); + conn.blocks_dal() + .insert_mock_l1_batch(&l1_batch) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_tree_data( + l1_batch.number, + &L1BatchTreeData { + hash: rng.gen(), + rollup_last_leaf_index: rng.gen(), + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_commitment_artifacts( + l1_batch.number, + &L1BatchCommitmentArtifacts { + commitment_hash: L1BatchCommitmentHash { + pass_through_data: rng.gen(), + aux_output: rng.gen(), + meta_parameters: rng.gen(), + commitment: rng.gen(), + }, + l2_l1_merkle_root: rng.gen(), + compressed_state_diffs: None, + compressed_initial_writes: None, + compressed_repeated_writes: None, + zkporter_is_available: false, + aux_commitments: None, + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) + .await + .unwrap(); + } + + let n = attester::BatchNumber(batch_number.into()); + + // Insert a batch certificate for the last L1 batch. + let hash = batch_hash(&conn.consensus_dal().batch_info(n).await.unwrap().unwrap()); + let want = make_cert(n, hash); + conn.consensus_dal() + .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) + .await + .unwrap(); + conn.consensus_dal() + .insert_batch_certificate(&want) + .await + .unwrap(); + + // Reinserting a cert should fail. + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n, hash)) + .await + .is_err()); + + // Retrieve the latest certificate. + let got_n = conn + .consensus_dal() + .last_batch_certificate_number() + .await + .unwrap() + .unwrap(); + let got = conn + .consensus_dal() + .batch_certificate(got_n) + .await + .unwrap() + .unwrap(); + assert_eq!(got, want); + + // Try insert batch certificate for non-existing batch + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n.next(), rng.gen())) + .await + .is_err()); +} diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 479649f8509..12e41ac780a 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -1,7 +1,6 @@ pub mod storage_block; -use anyhow::Context as _; use zksync_db_connection::error::SqlxContext; -use zksync_types::{ProtocolVersionId, H160, H256}; +use zksync_types::ProtocolVersionId; mod call; pub mod storage_base_token_ratio; @@ -19,18 +18,6 @@ pub mod storage_verification_request; #[cfg(test)] mod tests; -pub(crate) fn parse_h256(bytes: &[u8]) -> anyhow::Result { - Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) -} - -fn parse_h256_opt(bytes: Option<&[u8]>) -> anyhow::Result { - parse_h256(bytes.context("missing data")?) -} - -pub(crate) fn parse_h160(bytes: &[u8]) -> anyhow::Result { - Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) -} - pub(crate) fn parse_protocol_version(raw: i32) -> sqlx::Result { u16::try_from(raw) .decode_column("protocol_version")? diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index cf7b76d8163..7a4ebe074fe 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,13 +1,11 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::error::SqlxContext; use zksync_types::{ - api::en, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, + api::en, parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, + ProtocolVersionId, Transaction, H256, }; -use crate::{ - consensus_dal::Payload, - models::{parse_h160, parse_h256, parse_h256_opt, parse_protocol_version}, -}; +use crate::{consensus_dal::Payload, models::parse_protocol_version}; #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageSyncBlock { diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml index 8b68df854e7..1aa4c256e0f 100644 --- a/core/lib/l1_contract_interface/Cargo.toml +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -19,12 +19,14 @@ crypto_codegen.workspace = true # Used to calculate the kzg commitment and proofs kzg.workspace = true +anyhow.workspace = true sha2.workspace = true sha3.workspace = true hex.workspace = true once_cell.workspace = true [dev-dependencies] +rand.workspace = true serde.workspace = true serde_json.workspace = true serde_with = { workspace = true, features = ["base64", "hex"] } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs index d1ed57e41f2..aa987204901 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -3,4 +3,7 @@ mod commit_batch_info; mod stored_batch_info; +#[cfg(test)] +mod tests; + pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs index 8373c46e36b..26f9b30392e 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs @@ -1,7 +1,8 @@ +use anyhow::Context as _; use zksync_types::{ commitment::L1BatchWithMetadata, - ethabi::{self, Token}, - web3, + ethabi::{self, ParamType, Token}, + parse_h256, web3, web3::contract::Error as ContractError, H256, U256, }; @@ -9,7 +10,7 @@ use zksync_types::{ use crate::Tokenizable; /// `StoredBatchInfo` from `IExecutor.sol`. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct StoredBatchInfo { pub batch_number: u64, pub batch_hash: H256, @@ -22,11 +23,35 @@ pub struct StoredBatchInfo { } impl StoredBatchInfo { + fn schema() -> Vec { + vec![ParamType::Tuple(vec![ + ParamType::Uint(64), + ParamType::FixedBytes(32), + ParamType::Uint(64), + ParamType::Uint(256), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Uint(256), + ParamType::FixedBytes(32), + ])] + } + + /// Encodes the struct into RLP. + pub fn encode(&self) -> Vec { + ethabi::encode(&[self.clone().into_token()]) + } + + /// Decodes the struct from RLP. + pub fn decode(rlp: &[u8]) -> anyhow::Result { + let [token] = ethabi::decode_whole(&Self::schema(), rlp)? + .try_into() + .unwrap(); + Ok(Self::from_token(token)?) + } + /// `_hashStoredBatchInfo` from `Executor.sol`. pub fn hash(&self) -> H256 { - H256(web3::keccak256(ðabi::encode(&[self - .clone() - .into_token()]))) + H256(web3::keccak256(&self.encode())) } } @@ -46,11 +71,42 @@ impl From<&L1BatchWithMetadata> for StoredBatchInfo { } impl Tokenizable for StoredBatchInfo { - fn from_token(_token: Token) -> Result { - // Currently there is no need to decode this struct. - // We still want to implement `Tokenizable` trait for it, so that *once* it's needed - // the implementation is provided here and not in some other inconsistent way. - Err(ContractError::Other("Not implemented".into())) + fn from_token(token: Token) -> Result { + (|| { + let [ + Token::Uint(batch_number), + Token::FixedBytes(batch_hash), + Token::Uint(index_repeated_storage_changes), + Token::Uint(number_of_layer1_txs), + Token::FixedBytes(priority_operations_hash), + Token::FixedBytes(l2_logs_tree_root), + Token::Uint(timestamp), + Token::FixedBytes(commitment), + ] : [Token;8] = token + .into_tuple().context("not a tuple")? + .try_into().ok().context("bad length")? + else { anyhow::bail!("bad format") }; + Ok(Self { + batch_number: batch_number + .try_into() + .ok() + .context("overflow") + .context("batch_number")?, + batch_hash: parse_h256(&batch_hash).context("batch_hash")?, + index_repeated_storage_changes: index_repeated_storage_changes + .try_into() + .ok() + .context("overflow") + .context("index_repeated_storage_changes")?, + number_of_layer1_txs, + priority_operations_hash: parse_h256(&priority_operations_hash) + .context("priority_operations_hash")?, + l2_logs_tree_root: parse_h256(&l2_logs_tree_root).context("l2_logs_tree_root")?, + timestamp, + commitment: parse_h256(&commitment).context("commitment")?, + }) + })() + .map_err(|err| ContractError::InvalidOutputType(format!("{err:#}"))) } fn into_token(self) -> Token { diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs b/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs new file mode 100644 index 00000000000..0cb8caffb34 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs @@ -0,0 +1,32 @@ +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use super::*; + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> StoredBatchInfo { + StoredBatchInfo { + batch_number: rng.gen(), + batch_hash: rng.gen(), + index_repeated_storage_changes: rng.gen(), + number_of_layer1_txs: rng.gen::().into(), + priority_operations_hash: rng.gen(), + l2_logs_tree_root: rng.gen(), + timestamp: rng.gen::().into(), + commitment: rng.gen(), + } + } +} + +/// Test checking encoding and decoding of `StoredBatchInfo`. +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + for _ in 0..10 { + let want: StoredBatchInfo = rng.gen(); + let got = StoredBatchInfo::decode(&want.encode()).unwrap(); + assert_eq!(want, got); + } +} diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 9391c862757..209ab7c24f9 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -64,3 +64,9 @@ pub struct ConsensusGenesis(pub serde_json::Value); /// The wrapped JSON value corresponds to `zksync_dal::consensus::AttestationStatus`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AttestationStatus(pub serde_json::Value); + +/// Block metadata that should have been committed to on L1, but it is not. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::BlockMetadata`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockMetadata(pub serde_json::Value); diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index 8a4d2db8c6f..0f1fd9d34b8 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -28,6 +28,12 @@ pub trait EnNamespace { #[method(name = "consensusGlobalConfig")] async fn consensus_global_config(&self) -> RpcResult>; + #[method(name = "blockMetadata")] + async fn block_metadata( + &self, + block_number: L2BlockNumber, + ) -> RpcResult>; + /// Lists all tokens created at or before the specified `block_number`. /// /// This method is used by EN after snapshot recovery in order to recover token records. diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs index de763526373..9f5e54a5f4f 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs @@ -37,6 +37,15 @@ impl EnNamespaceServer for EnNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn block_metadata( + &self, + block_number: L2BlockNumber, + ) -> RpcResult> { + self.block_metadata_impl(block_number) + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn sync_tokens(&self, block_number: Option) -> RpcResult> { self.sync_tokens_impl(block_number) .await diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index 721ca985ceb..a09a0cb92fc 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -1,5 +1,6 @@ use anyhow::Context as _; use zksync_config::{configs::EcosystemContracts, GenesisConfig}; +use zksync_consensus_roles::validator; use zksync_dal::{CoreDal, DalError}; use zksync_types::{ api::en, protocol_version::ProtocolSemanticVersion, tokens::TokenInfo, Address, L1BatchNumber, @@ -86,6 +87,36 @@ impl EnNamespace { ))) } + #[tracing::instrument(skip(self))] + pub async fn block_metadata_impl( + &self, + block_number: L2BlockNumber, + ) -> Result, Web3Error> { + let Some(meta) = self + .state + .acquire_connection() + .await? + // unwrap is ok, because we start outermost transaction. + .transaction_builder() + .unwrap() + // run readonly transaction to perform consistent reads. + .set_readonly() + .build() + .await + .context("TransactionBuilder::build()")? + .consensus_dal() + .block_metadata(validator::BlockNumber(block_number.0.into())) + .await? + else { + return Ok(None); + }; + Ok(Some(en::BlockMetadata( + zksync_protobuf::serde::Serialize + .proto_fmt(&meta, serde_json::value::Serializer) + .unwrap(), + ))) + } + pub(crate) fn current_method(&self) -> &MethodTracer { &self.state.current_method } diff --git a/core/node/consensus/src/batch.rs b/core/node/consensus/src/batch.rs deleted file mode 100644 index af38f446c1b..00000000000 --- a/core/node/consensus/src/batch.rs +++ /dev/null @@ -1,275 +0,0 @@ -//! L1 Batch representation for sending over p2p network. -use anyhow::Context as _; -use zksync_concurrency::{ctx, error::Wrap as _}; -use zksync_consensus_roles::validator; -use zksync_dal::consensus_dal::Payload; -use zksync_l1_contract_interface::i_executor; -use zksync_metadata_calculator::api_server::{TreeApiClient, TreeEntryWithProof}; -use zksync_system_constants as constants; -use zksync_types::{ - abi, - block::{unpack_block_info, L2BlockHasher}, - AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, H256, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::storage::ConnectionPool; - -/// Commitment to the last block of a batch. -pub(crate) struct LastBlockCommit { - /// Hash of the `StoredBatchInfo` which is stored on L1. - /// The hashed `StoredBatchInfo` contains a `root_hash` of the L2 state, - /// which contains state of the `SystemContext` contract, - /// which contains enough data to reconstruct the hash - /// of the last L2 block of the batch. - pub(crate) info: H256, -} - -/// Witness proving what is the last block of a batch. -/// Contains the hash and the number of the last block. -pub(crate) struct LastBlockWitness { - info: i_executor::structures::StoredBatchInfo, - protocol_version: ProtocolVersionId, - - current_l2_block_info: TreeEntryWithProof, - tx_rolling_hash: TreeEntryWithProof, - l2_block_hash_entry: TreeEntryWithProof, -} - -/// Commitment to an L1 batch. -pub(crate) struct L1BatchCommit { - pub(crate) number: L1BatchNumber, - pub(crate) this_batch: LastBlockCommit, - pub(crate) prev_batch: LastBlockCommit, -} - -/// L1Batch with witness that can be -/// verified against `L1BatchCommit`. -pub struct L1BatchWithWitness { - pub(crate) blocks: Vec, - pub(crate) this_batch: LastBlockWitness, - pub(crate) prev_batch: LastBlockWitness, -} - -impl LastBlockWitness { - /// Address of the SystemContext contract. - fn system_context_addr() -> AccountTreeId { - AccountTreeId::new(constants::SYSTEM_CONTEXT_ADDRESS) - } - - /// Storage key of the `SystemContext.current_l2_block_info` field. - fn current_l2_block_info_key() -> U256 { - StorageKey::new( - Self::system_context_addr(), - constants::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ) - .hashed_key_u256() - } - - /// Storage key of the `SystemContext.tx_rolling_hash` field. - fn tx_rolling_hash_key() -> U256 { - StorageKey::new( - Self::system_context_addr(), - constants::SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ) - .hashed_key_u256() - } - - /// Storage key of the entry of the `SystemContext.l2BlockHash[]` array, corresponding to l2 - /// block with number i. - fn l2_block_hash_entry_key(i: L2BlockNumber) -> U256 { - let key = h256_to_u256(constants::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION) - + U256::from(i.0) % U256::from(constants::SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES); - StorageKey::new(Self::system_context_addr(), u256_to_h256(key)).hashed_key_u256() - } - - /// Loads a `LastBlockWitness` from storage. - async fn load( - ctx: &ctx::Ctx, - n: L1BatchNumber, - pool: &ConnectionPool, - tree: &dyn TreeApiClient, - ) -> ctx::Result { - let mut conn = pool.connection(ctx).await.wrap("pool.connection()")?; - let batch = conn - .batch(ctx, n) - .await - .wrap("batch()")? - .context("batch not in storage")?; - - let proofs = tree - .get_proofs( - n, - vec![ - Self::current_l2_block_info_key(), - Self::tx_rolling_hash_key(), - ], - ) - .await - .context("get_proofs()")?; - if proofs.len() != 2 { - return Err(anyhow::format_err!("proofs.len()!=2").into()); - } - let current_l2_block_info = proofs[0].clone(); - let tx_rolling_hash = proofs[1].clone(); - let (block_number, _) = unpack_block_info(current_l2_block_info.value.as_bytes().into()); - let prev = L2BlockNumber( - block_number - .checked_sub(1) - .context("L2BlockNumber underflow")? - .try_into() - .context("L2BlockNumber overflow")?, - ); - let proofs = tree - .get_proofs(n, vec![Self::l2_block_hash_entry_key(prev)]) - .await - .context("get_proofs()")?; - if proofs.len() != 1 { - return Err(anyhow::format_err!("proofs.len()!=1").into()); - } - let l2_block_hash_entry = proofs[0].clone(); - Ok(Self { - info: i_executor::structures::StoredBatchInfo::from(&batch), - protocol_version: batch - .header - .protocol_version - .context("missing protocol_version")?, - - current_l2_block_info, - tx_rolling_hash, - l2_block_hash_entry, - }) - } - - /// Verifies the proof against the commit and returns the hash - /// of the last L2 block. - pub(crate) fn verify(&self, comm: &LastBlockCommit) -> anyhow::Result<(L2BlockNumber, H256)> { - // Verify info. - anyhow::ensure!(comm.info == self.info.hash()); - - // Check the protocol version. - anyhow::ensure!( - self.protocol_version >= ProtocolVersionId::Version13, - "unsupported protocol version" - ); - - let (block_number, block_timestamp) = - unpack_block_info(self.current_l2_block_info.value.as_bytes().into()); - let prev = L2BlockNumber( - block_number - .checked_sub(1) - .context("L2BlockNumber underflow")? - .try_into() - .context("L2BlockNumber overflow")?, - ); - - // Verify merkle paths. - self.current_l2_block_info - .verify(Self::current_l2_block_info_key(), self.info.batch_hash) - .context("invalid merkle path for current_l2_block_info")?; - self.tx_rolling_hash - .verify(Self::tx_rolling_hash_key(), self.info.batch_hash) - .context("invalid merkle path for tx_rolling_hash")?; - self.l2_block_hash_entry - .verify(Self::l2_block_hash_entry_key(prev), self.info.batch_hash) - .context("invalid merkle path for l2_block_hash entry")?; - - let block_number = L2BlockNumber(block_number.try_into().context("block_number overflow")?); - // Derive hash of the last block - Ok(( - block_number, - L2BlockHasher::hash( - block_number, - block_timestamp, - self.l2_block_hash_entry.value, - self.tx_rolling_hash.value, - self.protocol_version, - ), - )) - } - - /// Last L2 block of the batch. - pub fn last_block(&self) -> validator::BlockNumber { - let (n, _) = unpack_block_info(self.current_l2_block_info.value.as_bytes().into()); - validator::BlockNumber(n) - } -} - -impl L1BatchWithWitness { - /// Loads an `L1BatchWithWitness` from storage. - pub(crate) async fn load( - ctx: &ctx::Ctx, - number: L1BatchNumber, - pool: &ConnectionPool, - tree: &dyn TreeApiClient, - ) -> ctx::Result { - let prev_batch = LastBlockWitness::load(ctx, number - 1, pool, tree) - .await - .with_wrap(|| format!("LastBlockWitness::make({})", number - 1))?; - let this_batch = LastBlockWitness::load(ctx, number, pool, tree) - .await - .with_wrap(|| format!("LastBlockWitness::make({number})"))?; - let mut conn = pool.connection(ctx).await.wrap("connection()")?; - let this = Self { - blocks: conn - .payloads( - ctx, - std::ops::Range { - start: prev_batch.last_block() + 1, - end: this_batch.last_block() + 1, - }, - ) - .await - .wrap("payloads()")?, - prev_batch, - this_batch, - }; - Ok(this) - } - - /// Verifies the L1Batch and witness against the commitment. - /// WARNING: the following fields of the payload are not currently verified: - /// * `l1_gas_price` - /// * `l2_fair_gas_price` - /// * `fair_pubdata_price` - /// * `virtual_blocks` - /// * `operator_address` - /// * `protocol_version` (present both in payload and witness, but neither has a commitment) - pub(crate) fn verify(&self, comm: &L1BatchCommit) -> anyhow::Result<()> { - let (last_number, last_hash) = self.this_batch.verify(&comm.this_batch)?; - let (mut prev_number, mut prev_hash) = self.prev_batch.verify(&comm.prev_batch)?; - anyhow::ensure!( - self.prev_batch - .info - .batch_number - .checked_add(1) - .context("batch_number overflow")? - == u64::from(comm.number.0) - ); - anyhow::ensure!(self.this_batch.info.batch_number == u64::from(comm.number.0)); - for (i, b) in self.blocks.iter().enumerate() { - anyhow::ensure!(b.l1_batch_number == comm.number); - anyhow::ensure!(b.protocol_version == self.this_batch.protocol_version); - anyhow::ensure!(b.last_in_batch == (i + 1 == self.blocks.len())); - prev_number += 1; - let mut hasher = L2BlockHasher::new(prev_number, b.timestamp, prev_hash); - for t in &b.transactions { - // Reconstruct transaction by converting it back and forth to `abi::Transaction`. - // This allows us to verify that the transaction actually matches the transaction - // hash. - // TODO: make consensus payload contain `abi::Transaction` instead. - // TODO: currently the payload doesn't contain the block number, which is - // annoying. Consider adding it to payload. - let t2 = Transaction::from_abi(abi::Transaction::try_from(t.clone())?, true)?; - anyhow::ensure!(t == &t2); - hasher.push_tx_hash(t.hash()); - } - prev_hash = hasher.finalize(self.this_batch.protocol_version); - anyhow::ensure!(prev_hash == b.hash); - } - anyhow::ensure!(prev_hash == last_hash); - anyhow::ensure!(prev_number == last_number); - Ok(()) - } -} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index 3584d533f66..4ad7a551ab4 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -169,7 +169,6 @@ pub(super) fn executor( server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, - max_batch_size: cfg.max_batch_size, node_key: node_key(secrets) .context("node_key")? .context("missing node_key")?, @@ -184,6 +183,5 @@ pub(super) fn executor( gossip_static_outbound, rpc, debug_page, - batch_poll_interval: time::Duration::seconds(1), }) } diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index e4be8d9d687..c358974fb0c 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_consensus_storage::BlockStore; use zksync_dal::consensus_dal; use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; @@ -32,8 +32,13 @@ impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). /// - /// NOTE: Before starting the consensus node it fetches all the blocks + /// If `enable_pregenesis` is false, + /// before starting the consensus node it fetches all the blocks /// older than consensus genesis from the main node using json RPC. + /// NOTE: currently `enable_pregenesis` is hardcoded to `false` in `era.rs`. + /// True is used only in tests. Once the `block_metadata` RPC is enabled everywhere + /// this flag should be removed and fetching pregenesis blocks will always be done + /// over the gossip network. pub async fn run( self, ctx: &ctx::Ctx, @@ -41,6 +46,7 @@ impl EN { cfg: ConsensusConfig, secrets: ConsensusSecrets, build_version: Option, + enable_pregenesis: bool, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -72,13 +78,15 @@ impl EN { drop(conn); // Fetch blocks before the genesis. - self.fetch_blocks( - ctx, - &mut payload_queue, - Some(global_config.genesis.first_block), - ) - .await - .wrap("fetch_blocks()")?; + if !enable_pregenesis { + self.fetch_blocks( + ctx, + &mut payload_queue, + Some(global_config.genesis.first_block), + ) + .await + .wrap("fetch_blocks()")?; + } // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. @@ -102,9 +110,14 @@ impl EN { // Run consensus component. // External nodes have a payload queue which they use to fetch data from the main node. - let (store, runner) = Store::new(ctx, self.pool.clone(), Some(payload_queue)) - .await - .wrap("Store::new()")?; + let (store, runner) = Store::new( + ctx, + self.pool.clone(), + Some(payload_queue), + Some(self.client.clone()), + ) + .await + .wrap("Store::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) @@ -112,11 +125,6 @@ impl EN { .wrap("BlockStore::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) - .await - .wrap("BatchStore::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let attestation = Arc::new(attestation::Controller::new(attester)); s.spawn_bg(self.run_attestation_controller( ctx, @@ -127,7 +135,6 @@ impl EN { let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, build_version)?, block_store, - batch_store, validator: config::validator_key(&secrets) .context("validator_key")? .map(|key| executor::Validator { @@ -210,10 +217,13 @@ impl EN { "waiting for hash of batch {:?}", status.next_batch_to_attest ); - let hash = self - .pool - .wait_for_batch_hash(ctx, status.next_batch_to_attest) - .await?; + let hash = consensus_dal::batch_hash( + &self + .pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?, + ); let Some(committee) = registry .attester_committee_for( ctx, diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 3150f839680..916b7cdd89a 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -59,8 +59,18 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - en.run(ctx, actions, cfg, secrets, Some(build_version)) - .await + // We will enable it once the main node on all envs supports + // `block_metadata()` JSON RPC method. + let enable_pregenesis = false; + en.run( + ctx, + actions, + cfg, + secrets, + Some(build_version), + enable_pregenesis, + ) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index ff9cdf86528..8bf078120aa 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -6,10 +6,6 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; mod abi; -// Currently `batch` module is only used in tests, -// but will be used in production once batch syncing is implemented in consensus. -#[allow(unused)] -mod batch; mod config; mod en; pub mod era; diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index f80bfe58954..5abbdc3503b 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -5,7 +5,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_consensus_storage::BlockStore; use zksync_dal::consensus_dal; use crate::{ @@ -43,7 +43,7 @@ pub async fn run_main_node( } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. - let (store, runner) = Store::new(ctx, pool.clone(), None) + let (store, runner) = Store::new(ctx, pool.clone(), None, None) .await .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); @@ -67,11 +67,6 @@ pub async fn run_main_node( .wrap("BlockStore::new()")?; s.spawn_bg(runner.run(ctx)); - let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) - .await - .wrap("BatchStore::new()")?; - s.spawn_bg(runner.run(ctx)); - let attestation = Arc::new(attestation::Controller::new(attester)); s.spawn_bg(run_attestation_controller( ctx, @@ -83,7 +78,6 @@ pub async fn run_main_node( let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, - batch_store, validator: Some(executor::Validator { key: validator_key, replica_store: Box::new(store.clone()), @@ -135,9 +129,10 @@ async fn run_attestation_controller( "waiting for hash of batch {:?}", status.next_batch_to_attest ); - let hash = pool - .wait_for_batch_hash(ctx, status.next_batch_to_attest) + let info = pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) .await?; + let hash = consensus_dal::batch_hash(&info); let Some(committee) = registry .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) .await diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs index 935cd673891..33392a7f206 100644 --- a/core/node/consensus/src/registry/tests.rs +++ b/core/node/consensus/src/registry/tests.rs @@ -1,5 +1,5 @@ use rand::Rng as _; -use zksync_concurrency::{ctx, scope}; +use zksync_concurrency::{ctx, scope, time}; use zksync_consensus_roles::{attester, validator::testonly::Setup}; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; @@ -7,6 +7,8 @@ use zksync_types::ProtocolVersionId; use super::*; use crate::storage::ConnectionPool; +const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + /// Test checking that parsing logic matches the abi specified in the json file. #[test] fn test_consensus_registry_abi() { @@ -73,7 +75,9 @@ async fn test_attester_committee() { node.push_block(&txs).await; node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_batch()).await?; + pool.wait_for_batch_info(ctx, node.last_batch(), POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // Read the attester committee using the vm. let batch = attester::BatchNumber(node.last_batch().0.into()); diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 0f9d7c8527f..c30398498a9 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -1,18 +1,18 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; -use zksync_consensus_crypto::keccak256::Keccak256; use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; -use zksync_consensus_storage::{self as storage, BatchStoreState}; -use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; +use zksync_consensus_storage as storage; +use zksync_dal::{ + consensus_dal::{AttestationStatus, BlockMetadata, GlobalConfig, Payload}, + Core, CoreDal, DalError, +}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{ - commitment::L1BatchWithMetadata, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, -}; +use zksync_types::{fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber}; use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; -use super::{InsertCertificateError, PayloadQueue}; +use super::PayloadQueue; use crate::config; /// Context-aware `zksync_dal::ConnectionPool` wrapper. @@ -54,24 +54,24 @@ impl ConnectionPool { /// Waits for the `number` L1 batch hash. #[tracing::instrument(skip_all)] - pub async fn wait_for_batch_hash( + pub async fn wait_for_batch_info( &self, ctx: &ctx::Ctx, number: attester::BatchNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + interval: time::Duration, + ) -> ctx::Result { loop { - if let Some(hash) = self + if let Some(info) = self .connection(ctx) .await .wrap("connection()")? - .batch_hash(ctx, number) + .batch_info(ctx, number) .await - .with_wrap(|| format!("batch_hash({number})"))? + .with_wrap(|| format!("batch_info({number})"))? { - return Ok(hash); + return Ok(info); } - ctx.sleep(POLL_INTERVAL).await?; + ctx.sleep(interval).await?; } } } @@ -109,16 +109,23 @@ impl<'a> Connection<'a> { .map_err(DalError::generalize)?) } - /// Wrapper for `consensus_dal().block_payloads()`. - pub async fn payloads( + pub async fn batch_info( &mut self, ctx: &ctx::Ctx, - numbers: std::ops::Range, - ) -> ctx::Result> { + n: attester::BatchNumber, + ) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().batch_info(n)).await??) + } + + /// Wrapper for `consensus_dal().block_metadata()`. + pub async fn block_metadata( + &mut self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { Ok(ctx - .wait(self.0.consensus_dal().block_payloads(numbers)) - .await? - .map_err(DalError::generalize)?) + .wait(self.0.consensus_dal().block_metadata(number)) + .await??) } /// Wrapper for `consensus_dal().block_certificate()`. @@ -138,7 +145,7 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &validator::CommitQC, - ) -> Result<(), InsertCertificateError> { + ) -> Result<(), super::InsertCertificateError> { Ok(ctx .wait(self.0.consensus_dal().insert_block_certificate(cert)) .await??) @@ -151,20 +158,10 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &attester::BatchQC, - ) -> Result<(), InsertCertificateError> { - use consensus_dal::InsertCertificateError as E; - let want_hash = self - .batch_hash(ctx, cert.message.number) - .await - .wrap("batch_hash()")? - .ok_or(E::MissingPayload)?; - if want_hash != cert.message.hash { - return Err(E::PayloadMismatch.into()); - } + ) -> Result<(), super::InsertCertificateError> { Ok(ctx .wait(self.0.consensus_dal().insert_batch_certificate(cert)) - .await? - .map_err(E::Other)?) + .await??) } /// Wrapper for `consensus_dal().upsert_attester_committee()`. @@ -203,37 +200,6 @@ impl<'a> Connection<'a> { .context("sqlx")?) } - /// Wrapper for `consensus_dal().batch_hash()`. - pub async fn batch_hash( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let n = L1BatchNumber(number.0.try_into().context("overflow")?); - let Some(meta) = ctx - .wait(self.0.blocks_dal().get_l1_batch_metadata(n)) - .await? - .context("get_l1_batch_metadata()")? - else { - return Ok(None); - }; - Ok(Some(attester::BatchHash(Keccak256::from_bytes( - StoredBatchInfo::from(&meta).hash().0, - )))) - } - - /// Wrapper for `blocks_dal().get_l1_batch_metadata()`. - pub async fn batch( - &mut self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.blocks_dal().get_l1_batch_metadata(number)) - .await? - .context("get_l1_batch_metadata()")?) - } - /// Wrapper for `FetcherCursor::new()`. pub async fn new_payload_queue( &mut self, @@ -249,10 +215,7 @@ impl<'a> Connection<'a> { } /// Wrapper for `consensus_dal().global_config()`. - pub async fn global_config( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { + pub async fn global_config(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(ctx.wait(self.0.consensus_dal().global_config()).await??) } @@ -260,7 +223,7 @@ impl<'a> Connection<'a> { pub async fn try_update_global_config( &mut self, ctx: &ctx::Ctx, - cfg: &consensus_dal::GlobalConfig, + cfg: &GlobalConfig, ) -> ctx::Result<()> { Ok(ctx .wait(self.0.consensus_dal().try_update_global_config(cfg)) @@ -273,14 +236,14 @@ impl<'a> Connection<'a> { Ok(ctx.wait(self.0.consensus_dal().next_block()).await??) } - /// Wrapper for `consensus_dal().block_certificates_range()`. + /// Wrapper for `consensus_dal().block_store_state()`. #[tracing::instrument(skip_all)] - pub(crate) async fn block_certificates_range( + pub(crate) async fn block_store_state( &mut self, ctx: &ctx::Ctx, ) -> ctx::Result { Ok(ctx - .wait(self.0.consensus_dal().block_certificates_range()) + .wait(self.0.consensus_dal().block_store_state()) .await??) } @@ -305,7 +268,7 @@ impl<'a> Connection<'a> { } tracing::info!("Performing a hard fork of consensus."); - let new = consensus_dal::GlobalConfig { + let new = GlobalConfig { genesis: validator::GenesisRaw { chain_id: spec.chain_id, fork_number: old.as_ref().map_or(validator::ForkNumber(0), |old| { @@ -334,38 +297,35 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::Result> { - let Some(justification) = self - .block_certificate(ctx, number) - .await - .wrap("block_certificate()")? - else { + ) -> ctx::Result> { + let Some(payload) = self.payload(ctx, number).await.wrap("payload()")? else { return Ok(None); }; - let payload = self - .payload(ctx, number) + if let Some(justification) = self + .block_certificate(ctx, number) .await - .wrap("payload()")? - .context("L2 block disappeared from storage")?; - - Ok(Some(validator::FinalBlock { - payload: payload.encode(), - justification, - })) - } + .wrap("block_certificate()")? + { + return Ok(Some( + validator::FinalBlock { + payload: payload.encode(), + justification, + } + .into(), + )); + } - /// Wrapper for `blocks_dal().get_sealed_l1_batch_number()`. - #[tracing::instrument(skip_all)] - pub async fn get_last_batch_number( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.blocks_dal().get_sealed_l1_batch_number()) - .await? - .context("get_sealed_l1_batch_number()")? - .map(|nr| attester::BatchNumber(nr.0 as u64))) + Ok(Some( + validator::PreGenesisBlock { + number, + payload: payload.encode(), + // We won't use justification until it is possible to verify + // payload against the L1 batch commitment. + justification: validator::Justification(vec![]), + } + .into(), + )) } /// Wrapper for `blocks_dal().get_l2_block_range_of_l1_batch()`. @@ -388,83 +348,11 @@ impl<'a> Connection<'a> { })) } - /// Construct the [attester::SyncBatch] for a given batch number. - pub async fn get_batch( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let Some((min, max)) = self - .get_l2_block_range_of_l1_batch(ctx, number) - .await - .context("get_l2_block_range_of_l1_batch()")? - else { - return Ok(None); - }; - - let payloads = self.payloads(ctx, min..max).await.wrap("payloads()")?; - let payloads = payloads.into_iter().map(|p| p.encode()).collect(); - - // TODO: Fill out the proof when we have the stateless L1 batch validation story finished. - // It is supposed to be a Merkle proof that the rolling hash of the batch has been included - // in the L1 system contract state tree. It is *not* the Ethereum state root hash, so producing - // it can be done without an L1 client, which is only required for validation. - let batch = attester::SyncBatch { - number, - payloads, - proof: Vec::new(), - }; - - Ok(Some(batch)) - } - - /// Construct the [storage::BatchStoreState] which contains the earliest batch and the last available [attester::SyncBatch]. - #[tracing::instrument(skip_all)] - pub async fn batches_range(&mut self, ctx: &ctx::Ctx) -> ctx::Result { - let first = self - .0 - .blocks_dal() - .get_earliest_l1_batch_number() - .await - .context("get_earliest_l1_batch_number()")?; - - let first = if first.is_some() { - first - } else { - self.0 - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await - .context("get_earliest_l1_batch_number()")? - .map(|s| s.l1_batch_number) - }; - - // TODO: In the future when we start filling in the `SyncBatch::proof` field, - // we can only run `get_batch` expecting `Some` result on numbers where the - // L1 state root hash is already available, so that we can produce some - // Merkle proof that the rolling hash of the L2 blocks in the batch has - // been included in the L1 state tree. At that point we probably can't - // call `get_last_batch_number` here, but something that indicates that - // the hashes/commitments on the L1 batch are ready and the thing has - // been included in L1; that potentially requires an API client as well. - let last = self - .get_last_batch_number(ctx) - .await - .context("get_last_batch_number()")?; - - Ok(BatchStoreState { - first: first - .map(|n| attester::BatchNumber(n.0 as u64)) - .unwrap_or(attester::BatchNumber(0)), - last, - }) - } - /// Wrapper for `consensus_dal().attestation_status()`. pub async fn attestation_status( &mut self, ctx: &ctx::Ctx, - ) -> ctx::Result> { + ) -> ctx::Result> { Ok(ctx .wait(self.0.consensus_dal().attestation_status()) .await? diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index cb8e039d7d0..ed83758ba9f 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -1,15 +1,18 @@ use std::sync::Arc; use anyhow::Context as _; -use tokio::sync::watch::Sender; use tracing::Instrument; use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time}; use zksync_consensus_bft::PayloadManager; -use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; -use zksync_consensus_storage::{self as storage, BatchStoreState}; +use zksync_consensus_roles::validator; +use zksync_consensus_storage::{self as storage}; use zksync_dal::consensus_dal::{self, Payload}; use zksync_node_sync::fetcher::{FetchedBlock, FetchedTransaction}; use zksync_types::L2BlockNumber; +use zksync_web3_decl::{ + client::{DynClient, L2}, + namespaces::EnNamespaceClient as _, +}; use super::{Connection, PayloadQueue}; use crate::storage::{ConnectionPool, InsertCertificateError}; @@ -46,7 +49,7 @@ fn to_fetched_block( } /// Wrapper of `ConnectionPool` implementing `ReplicaStore`, `PayloadManager`, -/// `PersistentBlockStore` and `PersistentBatchStore`. +/// `PersistentBlockStore`. /// /// Contains queues to save Quorum Certificates received over gossip to the store /// as and when the payload they are over becomes available. @@ -59,8 +62,8 @@ pub(crate) struct Store { block_certificates: ctx::channel::UnboundedSender, /// Range of L2 blocks for which we have a QC persisted. blocks_persisted: sync::watch::Receiver, - /// Range of L1 batches we have persisted. - batches_persisted: sync::watch::Receiver, + /// Main node client. None if this node is the main node. + client: Option>>, } struct PersistedBlockState(sync::watch::Sender); @@ -69,7 +72,6 @@ struct PersistedBlockState(sync::watch::Sender); pub struct StoreRunner { pool: ConnectionPool, blocks_persisted: PersistedBlockState, - batches_persisted: sync::watch::Sender, block_certificates: ctx::channel::UnboundedReceiver, } @@ -78,22 +80,15 @@ impl Store { ctx: &ctx::Ctx, pool: ConnectionPool, payload_queue: Option, + client: Option>>, ) -> ctx::Result<(Store, StoreRunner)> { let mut conn = pool.connection(ctx).await.wrap("connection()")?; // Initial state of persisted blocks - let blocks_persisted = conn - .block_certificates_range(ctx) - .await - .wrap("block_certificates_range()")?; - - // Initial state of persisted batches - let batches_persisted = conn.batches_range(ctx).await.wrap("batches_range()")?; - + let blocks_persisted = conn.block_store_state(ctx).await.wrap("blocks_range()")?; drop(conn); let blocks_persisted = sync::watch::channel(blocks_persisted).0; - let batches_persisted = sync::watch::channel(batches_persisted).0; let (block_certs_send, block_certs_recv) = ctx::channel::unbounded(); Ok(( @@ -102,12 +97,11 @@ impl Store { block_certificates: block_certs_send, block_payloads: Arc::new(sync::Mutex::new(payload_queue)), blocks_persisted: blocks_persisted.subscribe(), - batches_persisted: batches_persisted.subscribe(), + client, }, StoreRunner { pool, blocks_persisted: PersistedBlockState(blocks_persisted), - batches_persisted, block_certificates: block_certs_recv, }, )) @@ -125,7 +119,7 @@ impl PersistedBlockState { /// If `persisted.first` is moved forward, it means that blocks have been pruned. /// If `persisted.last` is moved forward, it means that new blocks with certificates have been /// persisted. - #[tracing::instrument(skip_all, fields(first = %new.first, last = ?new.last.as_ref().map(|l| l.message.proposal.number)))] + #[tracing::instrument(skip_all, fields(first = %new.first, next = ?new.next()))] fn update(&self, new: storage::BlockStoreState) { self.0.send_if_modified(|p| { if &new == p { @@ -139,10 +133,11 @@ impl PersistedBlockState { }); } - /// Checks if the given certificate is exactly the next one that should - /// be persisted. + /// Checks if the given certificate should be eventually persisted. + /// Only certificates block store state is a range of blocks for which we already have + /// certificates and we need certs only for the later ones. fn should_be_persisted(&self, cert: &validator::CommitQC) -> bool { - self.0.borrow().next() == cert.header().number + self.0.borrow().next() <= cert.header().number } /// Appends the `cert` to `persisted` range. @@ -152,7 +147,7 @@ impl PersistedBlockState { if p.next() != cert.header().number { return false; } - p.last = Some(cert); + p.last = Some(storage::Last::Final(cert)); true }); } @@ -163,7 +158,6 @@ impl StoreRunner { let StoreRunner { pool, blocks_persisted, - batches_persisted, mut block_certificates, } = self; @@ -176,13 +170,13 @@ impl StoreRunner { ) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); - let range = pool + let state = pool .connection(ctx) .await? - .block_certificates_range(ctx) + .block_store_state(ctx) .await - .wrap("block_certificates_range()")?; - blocks_persisted.update(range); + .wrap("block_store_state()")?; + blocks_persisted.update(state); ctx.sleep(POLL_INTERVAL).await?; Ok(()) @@ -195,60 +189,6 @@ impl StoreRunner { } }); - #[tracing::instrument(skip_all, fields(l1_batch = %next_batch_number))] - async fn gossip_sync_batches_iteration( - ctx: &ctx::Ctx, - pool: &ConnectionPool, - next_batch_number: &mut BatchNumber, - batches_persisted: &Sender, - ) -> ctx::Result<()> { - const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); - - let mut conn = pool.connection(ctx).await?; - if let Some(last_batch_number) = conn - .get_last_batch_number(ctx) - .await - .wrap("last_batch_number()")? - { - if last_batch_number >= *next_batch_number { - let range = conn.batches_range(ctx).await.wrap("batches_range()")?; - *next_batch_number = last_batch_number.next(); - tracing::info_span!("batches_persisted_send").in_scope(|| { - batches_persisted.send_replace(range); - }); - } - } - ctx.sleep(POLL_INTERVAL).await?; - - Ok(()) - } - - // NOTE: Running this update loop will trigger the gossip of `SyncBatches` which is currently - // pointless as there is no proof and we have to ignore them. We can disable it, but bear in - // mind that any node which gossips the availability will cause pushes and pulls in the consensus. - s.spawn::<()>(async { - // Loop updating `batches_persisted` whenever a new L1 batch is available in the database. - // We have to do this because the L1 batch is produced as L2 blocks are executed, - // which can happen on a different machine or in a different process, so we can't rely on some - // DAL method updating this memory construct. However I'm not sure that `BatchStoreState` - // really has to contain the full blown last batch, or whether it could have for example - // just the number of it. We can't just use the `attester::BatchQC`, which would make it - // analogous to the `BlockStoreState`, because the `SyncBatch` mechanism is for catching - // up with L1 batches from peers _without_ the QC, based on L1 inclusion proofs instead. - // Nevertheless since the `SyncBatch` contains all transactions for all L2 blocks, - // we can try to make it less frequent by querying just the last batch number first. - let mut next_batch_number = { batches_persisted.borrow().next() }; - loop { - gossip_sync_batches_iteration( - ctx, - &pool, - &mut next_batch_number, - &batches_persisted, - ) - .await?; - } - }); - #[tracing::instrument(skip_all)] async fn insert_block_certificates_iteration( ctx: &ctx::Ctx, @@ -339,7 +279,7 @@ impl storage::PersistentBlockStore for Store { &self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::Result { + ) -> ctx::Result { Ok(self .conn(ctx) .await? @@ -348,6 +288,41 @@ impl storage::PersistentBlockStore for Store { .context("not found")?) } + async fn verify_pregenesis_block( + &self, + ctx: &ctx::Ctx, + block: &validator::PreGenesisBlock, + ) -> ctx::Result<()> { + // We simply ask the main node for the payload hash and compare it against the received + // payload. + let meta = match &self.client { + None => self + .conn(ctx) + .await? + .block_metadata(ctx, block.number) + .await? + .context("metadata not in storage")?, + Some(client) => { + let meta = ctx + .wait(client.block_metadata(L2BlockNumber( + block.number.0.try_into().context("overflow")?, + ))) + .await? + .context("block_metadata()")? + .context("metadata not available")?; + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: false, + } + .proto_fmt(&meta.0) + .context("deserialize()")? + } + }; + if meta.payload_hash != block.payload.hash() { + return Err(anyhow::format_err!("payload hash mismatch").into()); + } + Ok(()) + } + /// If actions queue is set (and the block has not been stored yet), /// the block will be translated into a sequence of actions. /// The received actions should be fed @@ -356,19 +331,21 @@ impl storage::PersistentBlockStore for Store { /// `store_next_block()` call will wait synchronously for the L2 block. /// Once the L2 block is observed in storage, `store_next_block()` will store a cert for this /// L2 block. - async fn queue_next_block( - &self, - ctx: &ctx::Ctx, - block: validator::FinalBlock, - ) -> ctx::Result<()> { + async fn queue_next_block(&self, ctx: &ctx::Ctx, block: validator::Block) -> ctx::Result<()> { let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + let (p, j) = match &block { + validator::Block::Final(block) => (&block.payload, Some(&block.justification)), + validator::Block::PreGenesis(block) => (&block.payload, None), + }; if let Some(payloads) = &mut *payloads { payloads - .send(to_fetched_block(block.number(), &block.payload).context("to_fetched_block")?) + .send(to_fetched_block(block.number(), p).context("to_fetched_block")?) .await - .context("payload_queue.send()")?; + .context("payloads.send()")?; + } + if let Some(justification) = j { + self.block_certificates.send(justification.clone()); } - self.block_certificates.send(block.justification); Ok(()) } } @@ -455,43 +432,3 @@ impl PayloadManager for Store { Ok(()) } } - -#[async_trait::async_trait] -impl storage::PersistentBatchStore for Store { - /// Range of batches persisted in storage. - fn persisted(&self) -> sync::watch::Receiver { - self.batches_persisted.clone() - } - - /// Returns the batch with the given number. - async fn get_batch( - &self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - self.conn(ctx) - .await? - .get_batch(ctx, number) - .await - .wrap("get_batch") - } - - /// Queue the batch to be persisted in storage. - /// - /// The caller [BatchStore] ensures that this is only called when the batch is the next expected one. - async fn queue_next_batch( - &self, - _ctx: &ctx::Ctx, - _batch: attester::SyncBatch, - ) -> ctx::Result<()> { - // Currently the gossiping of `SyncBatch` and the `BatchStoreState` is unconditionally started by the `Network::run_stream` in consensus, - // and as long as any node reports new batches available by updating the `PersistentBatchStore::persisted` here, the other nodes - // will start pulling the corresponding batches, which will end up being passed to this method. - // If we return an error here or panic, it will stop the whole consensus task tree due to the way scopes work, so instead just return immediately. - // In the future we have to validate the proof agains the L1 state root hash, which IIUC we can't do just yet. - - // Err(anyhow::format_err!("unimplemented: queue_next_batch should not be called until we have the stateless L1 batch story completed.").into()) - - Ok(()) - } -} diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 5817e766c6b..2aed011d23c 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -7,8 +7,8 @@ use zksync_dal::CoreDal as _; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{recover, snapshot, Snapshot}; use zksync_types::{ - commitment::L1BatchWithMetadata, protocol_version::ProtocolSemanticVersion, - system_contracts::get_system_smart_contracts, L1BatchNumber, L2BlockNumber, ProtocolVersionId, + protocol_version::ProtocolSemanticVersion, system_contracts::get_system_smart_contracts, + L1BatchNumber, L2BlockNumber, ProtocolVersionId, }; use super::{Connection, ConnectionPool}; @@ -102,28 +102,6 @@ impl ConnectionPool { Ok(()) } - /// Waits for the `number` L1 batch. - pub async fn wait_for_batch( - &self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); - loop { - if let Some(payload) = self - .connection(ctx) - .await - .wrap("connection()")? - .batch(ctx, number) - .await - .wrap("batch()")? - { - return Ok(payload); - } - ctx.sleep(POLL_INTERVAL).await?; - } - } - /// Takes a storage snapshot at the last sealed L1 batch. pub(crate) async fn snapshot(&self, ctx: &ctx::Ctx) -> ctx::Result { let mut conn = self.connection(ctx).await.wrap("connection()")?; @@ -152,21 +130,32 @@ impl ConnectionPool { Self(pool) } - /// Waits for `want_last` block to have certificate then fetches all L2 blocks with certificates. - pub async fn wait_for_block_certificates( + /// Waits for `want_last` block then fetches all L2 blocks with certificates. + pub async fn wait_for_blocks( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, - ) -> ctx::Result> { - self.wait_for_block_certificate(ctx, want_last).await?; + ) -> ctx::Result> { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); + let state = loop { + let state = self + .connection(ctx) + .await + .wrap("connection()")? + .block_store_state(ctx) + .await + .wrap("block_store_state()")?; + tracing::info!("state.next() = {}", state.next()); + if state.next() > want_last { + break state; + } + ctx.sleep(POLL_INTERVAL).await?; + }; + + assert_eq!(want_last.next(), state.next()); let mut conn = self.connection(ctx).await.wrap("connection()")?; - let range = conn - .block_certificates_range(ctx) - .await - .wrap("certificates_range()")?; - assert_eq!(want_last.next(), range.next()); - let mut blocks: Vec = vec![]; - for i in range.first.0..range.next().0 { + let mut blocks: Vec = vec![]; + for i in state.first.0..state.next().0 { let i = validator::BlockNumber(i); let block = conn.block(ctx, i).await.context("block()")?.unwrap(); blocks.push(block); @@ -174,13 +163,13 @@ impl ConnectionPool { Ok(blocks) } - /// Same as `wait_for_certificates`, but additionally verifies all the blocks against genesis. - pub async fn wait_for_block_certificates_and_verify( + /// Same as `wait_for_blocks`, but additionally verifies all certificates. + pub async fn wait_for_blocks_and_verify_certs( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, - ) -> ctx::Result> { - let blocks = self.wait_for_block_certificates(ctx, want_last).await?; + ) -> ctx::Result> { + let blocks = self.wait_for_blocks(ctx, want_last).await?; let cfg = self .connection(ctx) .await @@ -190,7 +179,9 @@ impl ConnectionPool { .wrap("genesis()")? .context("genesis is missing")?; for block in &blocks { - block.verify(&cfg.genesis).context(block.number())?; + if let validator::Block::Final(block) = block { + block.verify(&cfg.genesis).context(block.number())?; + } } Ok(blocks) } @@ -228,19 +219,11 @@ impl ConnectionPool { let registry = registry::Registry::new(cfg.genesis.clone(), self.clone()).await; for i in first.0..want_last.0 { let i = attester::BatchNumber(i); - let hash = conn - .batch_hash(ctx, i) - .await - .wrap("batch_hash()")? - .context("hash missing")?; let cert = conn .batch_certificate(ctx, i) .await .wrap("batch_certificate")? .context("cert missing")?; - if cert.message.hash != hash { - return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); - } let committee = registry .attester_committee_for(ctx, registry_addr, i) .await @@ -255,28 +238,30 @@ impl ConnectionPool { pub async fn prune_batches( &self, ctx: &ctx::Ctx, - last_batch: L1BatchNumber, + last_batch: attester::BatchNumber, ) -> ctx::Result<()> { let mut conn = self.connection(ctx).await.context("connection()")?; - let (_, last_block) = ctx - .wait( - conn.0 - .blocks_dal() - .get_l2_block_range_of_l1_batch(last_batch), - ) - .await? - .context("get_l2_block_range_of_l1_batch()")? - .context("batch not found")?; - conn.0 - .pruning_dal() - .soft_prune_batches_range(last_batch, last_block) - .await - .context("soft_prune_batches_range()")?; - conn.0 - .pruning_dal() - .hard_prune_batches_range(last_batch, last_block) + let (_, last_block) = conn + .get_l2_block_range_of_l1_batch(ctx, last_batch) .await - .context("hard_prune_batches_range()")?; + .wrap("get_l2_block_range_of_l1_batch()")? + .context("batch not found")?; + let last_batch = L1BatchNumber(last_batch.0.try_into().context("oveflow")?); + let last_block = L2BlockNumber(last_block.0.try_into().context("oveflow")?); + ctx.wait( + conn.0 + .pruning_dal() + .soft_prune_batches_range(last_batch, last_block), + ) + .await? + .context("soft_prune_batches_range()")?; + ctx.wait( + conn.0 + .pruning_dal() + .hard_prune_batches_range(last_batch, last_block), + ) + .await? + .context("hard_prune_batches_range()")?; Ok(()) } } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 04a2dfbc083..4538337109a 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -16,10 +16,7 @@ use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_network as network; use zksync_consensus_roles::{attester, validator, validator::testonly::Setup}; use zksync_dal::{CoreDal, DalError}; -use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; -use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MetadataCalculator, MetadataCalculatorConfig, -}; +use zksync_metadata_calculator::{MetadataCalculator, MetadataCalculatorConfig}; use zksync_node_api_server::web3::{state::InternalApiConfig, testonly::TestServerBuilder}; use zksync_node_genesis::GenesisParams; use zksync_node_sync::{ @@ -48,11 +45,7 @@ use zksync_types::{ }; use zksync_web3_decl::client::{Client, DynClient, L2}; -use crate::{ - batch::{L1BatchCommit, L1BatchWithWitness, LastBlockCommit}, - en, - storage::ConnectionPool, -}; +use crate::{en, storage::ConnectionPool}; /// Fake StateKeeper for tests. #[derive(Debug)] @@ -70,7 +63,6 @@ pub(super) struct StateKeeper { sync_state: SyncState, addr: sync::watch::Receiver>, pool: ConnectionPool, - tree_reader: LazyAsyncTreeReader, } #[derive(Clone)] @@ -78,6 +70,7 @@ pub(super) struct ConfigSet { net: network::Config, pub(super) config: config::ConsensusConfig, pub(super) secrets: config::ConsensusSecrets, + pub(super) enable_pregenesis: bool, } impl ConfigSet { @@ -87,11 +80,17 @@ impl ConfigSet { config: make_config(&net, None), secrets: make_secrets(&net, None), net, + enable_pregenesis: self.enable_pregenesis, } } } -pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { +pub(super) fn new_configs( + rng: &mut impl Rng, + setup: &Setup, + seed_peers: usize, + pregenesis: bool, +) -> Vec { let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), @@ -131,6 +130,7 @@ pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) config: make_config(&net, Some(genesis_spec.clone())), secrets: make_secrets(&net, setup.attester_keys.get(i).cloned()), net, + enable_pregenesis: pregenesis, }) .collect() } @@ -248,7 +248,6 @@ impl StateKeeper { let metadata_calculator = MetadataCalculator::new(config, None, pool.0.clone()) .await .context("MetadataCalculator::new()")?; - let tree_reader = metadata_calculator.tree_reader(); Ok(( Self { protocol_version, @@ -261,7 +260,6 @@ impl StateKeeper { sync_state: sync_state.clone(), addr: addr.subscribe(), pool: pool.clone(), - tree_reader, }, StateKeeperRunner { actions_queue, @@ -369,51 +367,14 @@ impl StateKeeper { } /// Batch of the `last_block`. - pub fn last_batch(&self) -> L1BatchNumber { - self.last_batch + pub fn last_batch(&self) -> attester::BatchNumber { + attester::BatchNumber(self.last_batch.0.into()) } /// Last L1 batch that has been sealed and will have /// metadata computed eventually. - pub fn last_sealed_batch(&self) -> L1BatchNumber { - self.last_batch - (!self.batch_sealed) as u32 - } - - /// Loads a commitment to L1 batch directly from the database. - // TODO: ideally, we should rather fake fetching it from Ethereum. - // We can use `zksync_eth_client::clients::MockEthereum` for that, - // which implements `EthInterface`. It should be enough to use - // `MockEthereum.with_call_handler()`. - pub async fn load_batch_commit( - &self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result { - // TODO: we should mock the `eth_sender` as well. - let mut conn = self.pool.connection(ctx).await?; - let this = conn.batch(ctx, number).await?.context("missing batch")?; - let prev = conn - .batch(ctx, number - 1) - .await? - .context("missing batch")?; - Ok(L1BatchCommit { - number, - this_batch: LastBlockCommit { - info: StoredBatchInfo::from(&this).hash(), - }, - prev_batch: LastBlockCommit { - info: StoredBatchInfo::from(&prev).hash(), - }, - }) - } - - /// Loads an `L1BatchWithWitness`. - pub async fn load_batch_with_witness( - &self, - ctx: &ctx::Ctx, - n: L1BatchNumber, - ) -> ctx::Result { - L1BatchWithWitness::load(ctx, n, &self.pool, &self.tree_reader).await + pub fn last_sealed_batch(&self) -> attester::BatchNumber { + attester::BatchNumber((self.last_batch.0 - (!self.batch_sealed) as u32).into()) } /// Connects to the json RPC endpoint exposed by the state keeper. @@ -473,6 +434,7 @@ impl StateKeeper { cfgs.config, cfgs.secrets, cfgs.net.build_version, + cfgs.enable_pregenesis, ) .await } diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index 35d849ae616..bd3886bd4c8 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use rand::Rng as _; -use test_casing::test_casing; +use test_casing::{test_casing, Product}; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ @@ -9,10 +9,10 @@ use zksync_consensus_roles::{ }; use zksync_dal::consensus_dal; use zksync_test_account::Account; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_types::ProtocolVersionId; use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::VERSIONS; +use super::{POLL_INTERVAL, PREGENESIS, VERSIONS}; use crate::{ mn::run_main_node, registry::{testonly, Registry}, @@ -34,13 +34,13 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); // Setup nontrivial genesis. - while sk.last_sealed_batch() < L1BatchNumber(3) { + while sk.last_sealed_batch() < attester::BatchNumber(3) { sk.push_random_blocks(rng, account, 10).await; } let mut setup = SetupSpec::new(rng, 3); setup.first_block = sk.last_block(); let first_batch = sk.last_batch(); - let setup = Setup::from(setup); + let setup = Setup::from_spec(rng, setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; conn.try_update_global_config( ctx, @@ -54,7 +54,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { .wrap("try_update_global_config()")?; // Make sure that the first_batch is actually sealed. sk.seal_batch().await; - pool.wait_for_batch(ctx, first_batch).await?; + pool.wait_for_batch_info(ctx, first_batch, POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // Connect to API endpoint. let api = sk.connect(ctx).await?; @@ -84,11 +86,11 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { { let mut conn = pool.connection(ctx).await?; let number = status.next_batch_to_attest; - let hash = conn.batch_hash(ctx, number).await?.unwrap(); + let info = conn.batch_info(ctx, number).await?.unwrap(); let gcfg = conn.global_config(ctx).await?.unwrap(); let m = attester::Batch { number, - hash, + hash: consensus_dal::batch_hash(&info), genesis: gcfg.genesis.hash(), }; let mut sigs = attester::MultiSig::default(); @@ -124,9 +126,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -#[test_casing(2, VERSIONS)] +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_multiple_attesters(version: ProtocolVersionId) { +async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); @@ -135,7 +137,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId) { let account = &mut Account::random(); let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let mut cfgs = new_configs(rng, &setup, NODES); + let mut cfgs = new_configs(rng, &setup, NODES, pregenesis); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs deleted file mode 100644 index f0cae7f2c02..00000000000 --- a/core/node/consensus/src/tests/batch.rs +++ /dev/null @@ -1,124 +0,0 @@ -use test_casing::{test_casing, Product}; -use zksync_concurrency::{ctx, scope}; -use zksync_consensus_roles::validator; -use zksync_test_account::Account; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; - -use super::{FROM_SNAPSHOT, VERSIONS}; -use crate::{storage::ConnectionPool, testonly}; - -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test] -async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let pool = ConnectionPool::test(from_snapshot, version).await; - let account = &mut Account::random(); - - // Fill storage with unsigned L2 blocks and L1 batches in a way that the - // last L1 batch is guaranteed to have some L2 blocks executed in it. - scope::run!(ctx, |ctx, s| async { - // Start state keeper. - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - - for _ in 0..3 { - for _ in 0..2 { - sk.push_random_block(rng, account).await; - } - sk.seal_batch().await; - } - sk.push_random_block(rng, account).await; - - pool.wait_for_payload(ctx, sk.last_block()).await?; - - Ok(()) - }) - .await - .unwrap(); - - // Now we can try to retrieve the batch. - scope::run!(ctx, |ctx, _s| async { - let mut conn = pool.connection(ctx).await?; - let batches = conn.batches_range(ctx).await?; - let last = batches.last.expect("last is set"); - let (min, max) = conn - .get_l2_block_range_of_l1_batch(ctx, last) - .await? - .unwrap(); - - let last_batch = conn - .get_batch(ctx, last) - .await? - .expect("last batch can be retrieved"); - - assert_eq!( - last_batch.payloads.len(), - (max.0 - min.0) as usize, - "all block payloads present" - ); - - let first_payload = last_batch - .payloads - .first() - .expect("last batch has payloads"); - - let want_payload = conn.payload(ctx, min).await?.expect("payload is in the DB"); - let want_payload = want_payload.encode(); - - assert_eq!( - first_payload, &want_payload, - "first payload is the right number" - ); - - anyhow::Ok(()) - }) - .await - .unwrap(); -} - -/// Tests that generated L1 batch witnesses can be verified successfully. -/// TODO: add tests for verification failures. -#[test_casing(2, VERSIONS)] -#[tokio::test] -async fn test_batch_witness(version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let account = &mut Account::random(); - let to_fund = &[account.address]; - - scope::run!(ctx, |ctx, s| async { - let pool = ConnectionPool::from_genesis(version).await; - let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx, to_fund)); - - tracing::info!("analyzing storage"); - { - let mut conn = pool.connection(ctx).await.unwrap(); - let mut n = validator::BlockNumber(0); - while let Some(p) = conn.payload(ctx, n).await? { - tracing::info!("block[{n}] = {p:?}"); - n = n + 1; - } - } - - // Seal a bunch of batches. - node.push_random_blocks(rng, account, 10).await; - node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; - // We can verify only 2nd batch onward, because - // batch witness verifies parent of the last block of the - // previous batch (and 0th batch contains only 1 block). - for n in 2..=node.last_sealed_batch().0 { - let n = L1BatchNumber(n); - let batch_with_witness = node.load_batch_with_witness(ctx, n).await?; - let commit = node.load_batch_commit(ctx, n).await?; - batch_with_witness.verify(&commit)?; - } - Ok(()) - }) - .await - .unwrap(); -} diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 52abe3c810c..94fbcbb90d8 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -2,14 +2,14 @@ use anyhow::Context as _; use rand::Rng as _; use test_casing::{test_casing, Product}; use tracing::Instrument as _; -use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus as config; use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_roles::{ node, validator, validator::testonly::{Setup, SetupSpec}, }; -use zksync_consensus_storage::BlockStore; +use zksync_consensus_storage::{BlockStore, PersistentBlockStore}; use zksync_dal::consensus_dal; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; @@ -21,10 +21,100 @@ use crate::{ }; mod attestation; -mod batch; const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; const FROM_SNAPSHOT: [bool; 2] = [true, false]; +const PREGENESIS: [bool; 2] = [true, false]; +const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + +#[test_casing(2, VERSIONS)] +#[tokio::test] +async fn test_verify_pregenesis_block(version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let mut setup = SetupSpec::new(rng, 3); + setup.first_block = validator::BlockNumber(1000); + let setup = Setup::from_spec(rng, setup); + let cfg = consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + seed_peers: [].into(), + }; + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Start state keeper."); + let pool = ConnectionPool::test(/*from_snapshot=*/ false, version).await; + pool.connection(ctx) + .await + .wrap("connection()")? + .try_update_global_config(ctx, &cfg) + .await + .wrap("try_update_global_config()")?; + let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + + tracing::info!("Populate storage with a bunch of blocks."); + sk.push_random_blocks(rng, account, 5).await; + sk.seal_batch().await; + let blocks: Vec<_> = pool + .wait_for_blocks(ctx, sk.last_block()) + .await + .context("wait_for_blocks()")? + .into_iter() + .map(|b| match b { + validator::Block::PreGenesis(b) => b, + _ => panic!(), + }) + .collect(); + assert!(!blocks.is_empty()); + + tracing::info!("Create another store"); + let pool = ConnectionPool::test(/*from_snapshot=*/ false, version).await; + pool.connection(ctx) + .await + .wrap("connection()")? + .try_update_global_config(ctx, &cfg) + .await + .wrap("try_update_global_config()")?; + let (store, runner) = Store::new( + ctx, + pool.clone(), + None, + Some(sk.connect(ctx).await.unwrap()), + ) + .await + .unwrap(); + s.spawn_bg(runner.run(ctx)); + + tracing::info!("All the blocks from the main node should be valid."); + for b in &blocks { + store.verify_pregenesis_block(ctx, b).await.unwrap(); + } + tracing::info!("Malformed blocks should not be valid"); + for b in &blocks { + let mut p = consensus_dal::Payload::decode(&b.payload).unwrap(); + // Arbitrary small change. + p.timestamp = rng.gen(); + store + .verify_pregenesis_block( + ctx, + &validator::PreGenesisBlock { + number: b.number, + justification: b.justification.clone(), + payload: p.encode(), + }, + ) + .await + .unwrap_err(); + } + + Ok(()) + }) + .await + .unwrap(); +} #[test_casing(2, VERSIONS)] #[tokio::test] @@ -36,7 +126,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { let account = &mut Account::random(); // Fill storage with unsigned L2 blocks. - // Fetch a suffix of blocks that we will generate (fake) certs for. + // Fetch a suffix of blocks that we will generate certs for. let want = scope::run!(ctx, |ctx, s| async { // Start state keeper. let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; @@ -44,8 +134,9 @@ async fn test_validator_block_store(version: ProtocolVersionId) { sk.push_random_blocks(rng, account, 10).await; pool.wait_for_payload(ctx, sk.last_block()).await?; let mut setup = SetupSpec::new(rng, 3); - setup.first_block = validator::BlockNumber(4); - let mut setup = Setup::from(setup); + setup.first_block = validator::BlockNumber(0); + setup.first_pregenesis_block = setup.first_block; + let mut setup = Setup::from_spec(rng, setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; conn.try_update_global_config( ctx, @@ -75,7 +166,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // Insert blocks one by one and check the storage state. for (i, block) in want.iter().enumerate() { scope::run!(ctx, |ctx, s| async { - let (store, runner) = Store::new(ctx, pool.clone(), None).await.unwrap(); + let (store, runner) = Store::new(ctx, pool.clone(), None, None).await.unwrap(); s.spawn_bg(runner.run(ctx)); let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())).await.unwrap(); @@ -85,10 +176,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { .wait_until_persisted(ctx, block.number()) .await .unwrap(); - let got = pool - .wait_for_block_certificates(ctx, block.number()) - .await - .unwrap(); + let got = pool.wait_for_blocks(ctx, block.number()).await.unwrap(); assert_eq!(want[..=i], got); Ok(()) }) @@ -100,14 +188,14 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_validator(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -149,9 +237,9 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Verify all certificates"); pool - .wait_for_block_certificates_and_verify(ctx, sk.last_block()) + .wait_for_blocks_and_verify_certs(ctx, sk.last_block()) .await - .context("wait_for_block_certificates_and_verify()")?; + .context("wait_for_blocks_and_verify_certs()")?; Ok(()) }) .await @@ -164,14 +252,14 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { } // Test running a validator node and 2 full nodes recovered from different snapshots. -#[test_casing(2, VERSIONS)] +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { +async fn test_nodes_from_various_snapshots(version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -226,15 +314,15 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { tracing::info!("produce more blocks and compare storages"); validator.push_random_blocks(rng, account, 5).await; let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; // node stores should be suffixes for validator store. for got in [ node_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?, node_pool2 - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?, ] { assert_eq!(want[want.len() - got.len()..], got[..]); @@ -245,14 +333,14 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { .unwrap(); } -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let mut validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let mut validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -304,12 +392,12 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; assert_eq!( want, node_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); Ok(()) @@ -322,16 +410,16 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { // Test running a validator node and a couple of full nodes. // Validator is producing signed blocks and fetchers are expected to fetch // them directly or indirectly. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 2; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); // topology: @@ -391,13 +479,15 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { // Note that block from before and after genesis have to be fetched. validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); + tracing::info!("Waiting for the validator to produce block {want_last}."); let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; + tracing::info!("Waiting for the nodes to fetch block {want_last}."); for pool in &node_pools { assert_eq!( want, - pool.wait_for_block_certificates_and_verify(ctx, want_last) + pool.wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); } @@ -408,16 +498,16 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { } // Test running external node (non-leader) validators. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 3; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); - let cfgs = testonly::new_configs(rng, &setup, 1); + let cfgs = testonly::new_configs(rng, &setup, 1, pregenesis); let account = &mut Account::random(); // Run all nodes in parallel. @@ -475,12 +565,12 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { main_node.push_random_blocks(rng, account, 5).await; let want_last = main_node.last_block(); let want = main_node_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; for pool in &ext_node_pools { assert_eq!( want, - pool.wait_for_block_certificates_and_verify(ctx, want_last) + pool.wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); } @@ -491,14 +581,18 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { } // Test fetcher back filling missing certs. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_p2p_fetcher_backfill_certs( + from_snapshot: bool, + version: ProtocolVersionId, + pregenesis: bool, +) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -555,10 +649,10 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); validator.push_random_blocks(rng, account, 3).await; let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; let got = node_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; assert_eq!(want, got); Ok(()) @@ -571,14 +665,14 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV .unwrap(); } -#[test_casing(2, VERSIONS)] +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_with_pruning(version: ProtocolVersionId) { +async fn test_with_pruning(version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -642,27 +736,28 @@ async fn test_with_pruning(version: ProtocolVersionId) { validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool - .wait_for_batch(ctx, validator.last_sealed_batch()) - .await?; + .wait_for_batch_info(ctx, validator.last_sealed_batch(), POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // The main node is not supposed to be pruned. In particular `ConsensusDal::attestation_status` // does not look for where the last prune happened at, and thus if we prune the block genesis // points at, we might never be able to start the Executor. tracing::info!("Wait until the external node has all the batches we want to prune"); node_pool - .wait_for_batch(ctx, to_prune.next()) + .wait_for_batch_info(ctx, to_prune.next(), POLL_INTERVAL) .await - .context("wait_for_batch()")?; + .wrap("wait_for_batch_info()")?; tracing::info!("Prune some blocks and sync more"); node_pool .prune_batches(ctx, to_prune) .await - .context("prune_batches")?; + .wrap("prune_batches")?; validator.push_random_blocks(rng, account, 5).await; node_pool - .wait_for_block_certificates(ctx, validator.last_block()) + .wait_for_blocks(ctx, validator.last_block()) .await - .context("wait_for_block_certificates()")?; + .wrap("wait_for_blocks()")?; Ok(()) }) .await diff --git a/prover/Cargo.lock b/prover/Cargo.lock index c085c1b5455..1d584a473d9 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -2393,6 +2393,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "handlebars" +version = "3.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3" +dependencies = [ + "log", + "pest", + "pest_derive", + "quick-error 2.0.1", + "serde", + "serde_json", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -4629,6 +4643,12 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + [[package]] name = "quick-protobuf" version = "0.8.1" @@ -5213,7 +5233,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error", + "quick-error 1.2.3", "tempfile", "wait-timeout", ] @@ -7796,9 +7816,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -7836,9 +7856,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -7857,9 +7877,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -7879,9 +7899,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" +checksum = "b2b2aab4ed18b13cd584f4edcc2546c8da82f89ac62e525063e12935ff28c9be" dependencies = [ "anyhow", "async-trait", @@ -7899,9 +7919,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand 0.8.5", @@ -7986,10 +8006,13 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_consensus_storage", + "zksync_consensus_utils", "zksync_contracts", "zksync_db_connection", + "zksync_l1_contract_interface", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -8098,6 +8121,21 @@ dependencies = [ "zkevm_circuits 0.150.5", ] +[[package]] +name = "zksync_l1_contract_interface" +version = "0.1.0" +dependencies = [ + "anyhow", + "hex", + "once_cell", + "sha2 0.10.8", + "sha3 0.10.8", + "zksync_kzg", + "zksync_prover_interface", + "zksync_solidity_vk_codegen", + "zksync_types", +] + [[package]] name = "zksync_mini_merkle_tree" version = "0.1.0" @@ -8209,9 +8247,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -8230,9 +8268,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck 0.5.0", @@ -8472,6 +8510,23 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_solidity_vk_codegen" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b310ab8a21681270e73f177ddf7974cabb7a96f0624ab8b008fd6ee1f9b4f687" +dependencies = [ + "ethereum-types", + "franklin-crypto", + "handlebars", + "hex", + "paste", + "rescue_poseidon", + "serde", + "serde_derive", + "serde_json", +] + [[package]] name = "zksync_system_constants" version = "0.1.0" diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 97d4d181c52..f6f8087c699 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6780,9 +6780,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -6818,9 +6818,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -6839,9 +6839,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -6861,9 +6861,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand", @@ -6912,9 +6912,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -6933,9 +6933,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index d37c7e25677..cb442a6182e 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -32,11 +32,11 @@ git_version_macro = { path = "crates/git_version_macro" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_consensus_roles = "=0.3.0" -zksync_consensus_crypto = "=0.3.0" -zksync_consensus_utils = "=0.3.0" -zksync_protobuf = "=0.3.0" -zksync_protobuf_build = "=0.3.0" +zksync_consensus_roles = "=0.5.0" +zksync_consensus_crypto = "=0.5.0" +zksync_consensus_utils = "=0.5.0" +zksync_protobuf = "=0.5.0" +zksync_protobuf_build = "=0.5.0" # External dependencies anyhow = "1.0.82" diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs index 711a4bffae2..facd98850d4 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs @@ -30,7 +30,7 @@ mod wallet; pub enum TestCommands { #[clap(about = MSG_INTEGRATION_TESTS_ABOUT, alias = "i")] Integration(IntegrationArgs), - #[clap(about = "Run fees test", alias = "i")] + #[clap(about = "Run fees test", alias = "f")] Fees(FeesArgs), #[clap(about = MSG_REVERT_TEST_ABOUT, alias = "r")] Revert(RevertArgs), From 408d339a51cdda73c4eab9e6fa98e0f02d90c64e Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Thu, 10 Oct 2024 16:29:00 +1100 Subject: [PATCH 029/140] chore(zk_toolbox): use published sqruff-lib (#3053) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Makes zks use a published version of sqruff-lib ## Why ❔ zks is no longer installable (git version of sqruff-lib got bumped to 0.19.0 today) ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- zk_toolbox/Cargo.lock | 19 +++++++++++-------- zk_toolbox/crates/zk_supervisor/Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index f6f8087c699..279afaaf1b9 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -3827,7 +3827,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.10.5", "proc-macro2", "quote", "syn 2.0.79", @@ -5166,8 +5166,9 @@ dependencies = [ [[package]] name = "sqruff-lib" -version = "0.18.2" -source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "676775189e83a98fc603d59fc6d760a66895d511502a538081dac993fde1a09a" dependencies = [ "ahash", "anstyle", @@ -5200,8 +5201,9 @@ dependencies = [ [[package]] name = "sqruff-lib-core" -version = "0.18.2" -source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ec5ba65376ae9ba3e3dda153668dcb6452a7212ee7b4c9d48e053eb4f0f3fa" dependencies = [ "ahash", "enum_dispatch", @@ -5220,8 +5222,9 @@ dependencies = [ [[package]] name = "sqruff-lib-dialects" -version = "0.18.2" -source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00fa1cd168dad593f8f6996d805acc1fd52c6d0ad0f6f5847a9cc22a6198cfc2" dependencies = [ "ahash", "itertools 0.13.0", @@ -6312,7 +6315,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index 158abe4e2ec..d343e7af43e 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -29,4 +29,4 @@ futures.workspace = true types.workspace = true serde_yaml.workspace = true zksync_basic_types.workspace = true -sqruff-lib = { git = "https://github.com/quarylabs/sqruff", version = "0.18.2" } +sqruff-lib = "0.19.0" From e51ec49d3822bc022d7f727344106aaeb1dc5c30 Mon Sep 17 00:00:00 2001 From: Dustin Brickwood Date: Thu, 10 Oct 2024 02:36:10 -0500 Subject: [PATCH 030/140] feat: adds support for vyper 0.4.0 verification as its now supported since zkvyper 1.5.1 (#3049) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I am not sure if any other changes are required to facilitate supporting vyper 0.4.0 verification so please advise accordingly. ## What ❔ - Adds support for vyper 0.4.0 verification since zkvyper has supported 0.4.0 since [1.5.1](https://github.com/matter-labs/era-compiler-vyper/blob/main/CHANGELOG.md#151---2024-06-27). ## Why ❔ - Contract verification does not allow for 0.4.0, see the issue: https://github.com/matter-labs/block-explorer/issues/290 ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- docker/contract-verifier/Dockerfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7943dae835a..5688db2e3f5 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -91,6 +91,10 @@ RUN mkdir -p /etc/vyper-bin/0.3.10 \ && wget -O vyper0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux \ && mv vyper0.3.10 /etc/vyper-bin/0.3.10/vyper \ && chmod +x /etc/vyper-bin/0.3.10/vyper +RUN mkdir -p /etc/vyper-bin/0.4.0 \ + && wget -O vyper0.4.0 https://github.com/vyperlang/vyper/releases/download/v0.4.0/vyper.0.4.0+commit.e9db8d9f.linux \ + && mv vyper0.4.0 /etc/vyper-bin/0.4.0/vyper \ + && chmod +x /etc/vyper-bin/0.4.0/vyper COPY --from=builder /usr/src/zksync/target/release/zksync_contract_verifier /usr/bin/ COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ From 91d0595631cc5f5bffc42a4b04d5015d2be659b1 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:23:44 +0300 Subject: [PATCH 031/140] fix: bincode deserialize for WitnessInputData (#3055) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fix bincode deserialization for WitnessInputData ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/lib/prover_interface/src/inputs.rs | 31 ++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index f5f389362dd..28bc1998312 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -213,6 +213,25 @@ pub struct WitnessInputData { pub eip_4844_blobs: Eip4844Blobs, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WitnessInputDataLegacy { + pub vm_run_data: VMRunWitnessInputDataLegacy, + pub merkle_paths: WitnessInputMerklePaths, + pub previous_batch_metadata: L1BatchMetadataHashes, + pub eip_4844_blobs: Eip4844Blobs, +} + +impl From for WitnessInputData { + fn from(value: WitnessInputDataLegacy) -> Self { + Self { + vm_run_data: value.vm_run_data.into(), + merkle_paths: value.merkle_paths, + previous_batch_metadata: value.previous_batch_metadata, + eip_4844_blobs: value.eip_4844_blobs, + } + } +} + impl StoredObject for WitnessInputData { const BUCKET: Bucket = Bucket::WitnessInput; @@ -222,7 +241,17 @@ impl StoredObject for WitnessInputData { format!("witness_inputs_{key}.bin") } - serialize_using_bincode!(); + fn serialize(&self) -> Result, BoxedError> { + zksync_object_store::bincode::serialize(self).map_err(Into::into) + } + + fn deserialize(bytes: Vec) -> Result { + zksync_object_store::bincode::deserialize::(&bytes).or_else(|_| { + zksync_object_store::bincode::deserialize::(&bytes) + .map(Into::into) + .map_err(Into::into) + }) + } } #[derive(Debug, Clone, Serialize, Deserialize)] From e5b5a3b7b62e8d4035fe89c2a287bf3606d17bc5 Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Thu, 10 Oct 2024 21:00:07 +1100 Subject: [PATCH 032/140] feat(state-keeper): pre-insert unsealed L1 batches (#2846) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ `MempoolIO` and `ExternalIO` pre-insert unsealed batches into DB as soon as they open them. I chose to populate non-null fields with default values to minimize the impact of this PR, but I am open for a discussion if anyone thinks we need to make those fields nullable (specifically `l1_tx_count`, `l2_tx_count`, `bloom`, `priority_ops_onchain_data`, `initial_bootloader_heap_content`, and `used_contract_hashes`). ## Why ❔ * Better visibility of what's going on for node operators * This opens a road to fix the issue with gas fluctuations on chains with low amounts of traffic as `api_server` will have access to the most up-to-date fee input ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- ...0fa76a03907957b7a0d87ea55a7873f3312e.json} | 12 +- ...98869d490ea0a96aa9d5b9a22b34ab0f8f47.json} | 4 +- ...ed820f8869acb6f59aa6dd704c0f5b4e45ec.json} | 4 +- ...7791290f3bfff4de742f2a918a3fd4e5608c.json} | 4 +- ...f7cd9c8486d1613319e1f6bc038ddff539f8.json} | 4 +- ...00a91254ec6c8a68a359d22b02df5a40911f.json} | 4 +- ...5598f5d22a3aebd893afddded0e3c6b94a3b.json} | 12 +- ...fbde6eb6634d7a63005081ffc1eb6c28e9ec.json} | 12 +- ...3010bea7cee09a9538a4e275ea89f67704966.json | 23 ++ ...bf3ffb03b3791c0e9a9f39fb85cfffc65db2.json} | 12 +- ...5e94ad6bdd84c31e4b2e0c629e51857533974.json | 23 -- ...f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json | 23 ++ ...3ba84478b6e56c95dfae6d8cc84e938e80c6.json} | 4 +- ...e0294e53eb37c1a2dbcc3044b8311200d549a.json | 33 ++ ...0371bdc118b25d64fcf526bd6575e4d675c8.json} | 12 +- ...4ba52c9a0e64d1badc39cc2fef29b1468621a.json | 56 ++++ ...d3b4c514a18d8a33ec978d3e8007af8d0c20.json} | 4 +- ...526c586708c812dc00b10bf3cd8aa871d9c2.json} | 4 +- ...56e43137ac0cf45312d70dec0c407cadc1bf.json} | 12 +- ...019baa12323fd3ef381fdacf290a3db3ec77.json} | 4 +- ...ac6f0b00c05229a0bd40902d5fcb1c1bf026.json} | 4 +- ...0a5b8081edf28fa1b67f71101d2e3621be798.json | 20 ++ ...6b563ff2f0f3a818e8c8a02c2ef632d0b960.json} | 12 +- ...99bf19b587a16ad70a671b0de48fd608bf31c.json | 23 -- ...c058f9ad703461a1f55c534bf3d9f48eb61b.json} | 4 +- ...926df634ebf0d8286181fa04884fb747cee8.json} | 4 +- ...ebfcd3b9c287fecde4376afa84c0566a55ef.json} | 4 +- ...1cd8aa22592f0808f3c2f0555ca321459815e.json | 22 ++ ...ddb30ca0d9ea0190786b8e8472c622e98b9c.json} | 4 +- ...2baec4b2531ecaa8da234863e2eb810761c7.json} | 12 +- ...3cab780f7ed1d91199b4d34011cdc9376c005.json | 22 -- ...8c8132d0958e4e25f4954e93d2095b4f11e8.json} | 12 +- ...914f15fd7a5fa3d7f7bc56906817c70b04950.json | 34 -- ...0910112120_unsealed_batches_in_db.down.sql | 5 + ...240910112120_unsealed_batches_in_db.up.sql | 5 + core/lib/dal/src/blocks_dal.rs | 292 +++++++++++++----- core/lib/dal/src/blocks_web3_dal.rs | 2 + core/lib/dal/src/consensus_dal/mod.rs | 2 + core/lib/dal/src/models/storage_block.rs | 38 ++- core/lib/dal/src/protocol_versions_dal.rs | 2 + core/lib/dal/src/storage_web3_dal.rs | 2 + core/lib/dal/src/sync_dal.rs | 2 + core/lib/dal/src/vm_runner_dal.rs | 4 + core/lib/types/src/block.rs | 11 + core/node/block_reverter/src/tests.rs | 1 + core/node/genesis/src/lib.rs | 13 +- core/node/node_sync/src/external_io.rs | 13 + core/node/state_keeper/src/io/mempool.rs | 37 +++ core/node/state_keeper/src/io/mod.rs | 4 +- core/node/state_keeper/src/io/persistence.rs | 22 +- .../state_keeper/src/io/seal_logic/mod.rs | 3 +- core/node/state_keeper/src/io/tests/mod.rs | 50 +++ core/node/state_keeper/src/io/tests/tester.rs | 16 +- core/node/state_keeper/src/updates/mod.rs | 2 +- 54 files changed, 716 insertions(+), 253 deletions(-) rename core/lib/dal/.sqlx/{query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json => query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json} (87%) rename core/lib/dal/.sqlx/{query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json => query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json} (53%) rename core/lib/dal/.sqlx/{query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json => query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json} (73%) rename core/lib/dal/.sqlx/{query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json => query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json} (71%) rename core/lib/dal/.sqlx/{query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json => query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json} (62%) rename core/lib/dal/.sqlx/{query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json => query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json} (63%) rename core/lib/dal/.sqlx/{query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json => query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json} (81%) rename core/lib/dal/.sqlx/{query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json => query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json} (83%) create mode 100644 core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json rename core/lib/dal/.sqlx/{query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json => query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json} (86%) delete mode 100644 core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json create mode 100644 core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json rename core/lib/dal/.sqlx/{query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json => query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json} (57%) create mode 100644 core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json rename core/lib/dal/.sqlx/{query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json => query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json} (86%) create mode 100644 core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json rename core/lib/dal/.sqlx/{query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json => query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json} (72%) rename core/lib/dal/.sqlx/{query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json => query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json} (73%) rename core/lib/dal/.sqlx/{query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json => query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json} (75%) rename core/lib/dal/.sqlx/{query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json => query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json} (69%) rename core/lib/dal/.sqlx/{query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json => query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json} (73%) create mode 100644 core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json rename core/lib/dal/.sqlx/{query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json => query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json} (89%) delete mode 100644 core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json rename core/lib/dal/.sqlx/{query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json => query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json} (74%) rename core/lib/dal/.sqlx/{query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json => query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json} (58%) rename core/lib/dal/.sqlx/{query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json => query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json} (57%) create mode 100644 core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json rename core/lib/dal/.sqlx/{query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json => query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json} (57%) rename core/lib/dal/.sqlx/{query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json => query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json} (88%) delete mode 100644 core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json rename core/lib/dal/.sqlx/{query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json => query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json} (79%) delete mode 100644 core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json create mode 100644 core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql create mode 100644 core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql diff --git a/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json b/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json similarity index 87% rename from core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json rename to core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json index 8f41bf3b491..84f677a36c8 100644 --- a/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json +++ b/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -171,8 +176,9 @@ false, true, true, - true + true, + false ] }, - "hash": "860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192" + "hash": "0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e" } diff --git a/core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json b/core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json similarity index 53% rename from core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json rename to core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json index 9cf4cc1e68e..36879466039 100644 --- a/core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json +++ b/core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n is_sealed\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ true ] }, - "hash": "1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03" + "hash": "16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47" } diff --git a/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json b/core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json similarity index 73% rename from core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json rename to core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json index 853acb9f71a..a101edbb9ea 100644 --- a/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json +++ b/core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_refunds,\n pubdata_costs\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n storage_refunds,\n pubdata_costs\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ true ] }, - "hash": "cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a" + "hash": "1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec" } diff --git a/core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json b/core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json similarity index 71% rename from core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json rename to core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json index fc36e47b54c..1078e0b57f6 100644 --- a/core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json +++ b/core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n initial_bootloader_heap_content\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n initial_bootloader_heap_content\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea" + "hash": "1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c" } diff --git a/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json b/core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json similarity index 62% rename from core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json rename to core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json index 433564c6ae0..aa657582690 100644 --- a/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json +++ b/core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n (\n SELECT\n l1_batch_number\n FROM\n miniblocks\n WHERE\n number = $1\n ) AS \"block_batch?\",\n COALESCE(\n (\n SELECT\n MAX(number) + 1\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n ),\n 0\n ) AS \"pending_batch!\"\n ", + "query": "\n SELECT\n (\n SELECT\n l1_batch_number\n FROM\n miniblocks\n WHERE\n number = $1\n ) AS \"block_batch?\",\n COALESCE(\n (\n SELECT\n MAX(number) + 1\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n ),\n 0\n ) AS \"pending_batch!\"\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ null ] }, - "hash": "c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5" + "hash": "1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8" } diff --git a/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json b/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json similarity index 63% rename from core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json rename to core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json index 6f77a656072..b8f8db874b6 100644 --- a/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json +++ b/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -97,5 +97,5 @@ false ] }, - "hash": "51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6" + "hash": "2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f" } diff --git a/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json b/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json similarity index 81% rename from core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json rename to core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json index a3d356f4bea..afac14e6d5c 100644 --- a/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json +++ b/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -174,8 +179,9 @@ false, true, true, - true + true, + false ] }, - "hash": "16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870" + "hash": "2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b" } diff --git a/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json b/core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json similarity index 83% rename from core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json rename to core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json index 28fbea09998..804318120fc 100644 --- a/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json +++ b/core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -72,6 +72,11 @@ "ordinal": 13, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -93,8 +98,9 @@ true, true, false, - true + true, + false ] }, - "hash": "7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e" + "hash": "4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec" } diff --git a/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json b/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json new file mode 100644 index 00000000000..2cd528a9f53 --- /dev/null +++ b/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966" +} diff --git a/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json b/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json similarity index 86% rename from core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json rename to core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json index 032cf987fc0..4eae4f778ce 100644 --- a/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json +++ b/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -171,8 +176,9 @@ false, true, true, - true + true, + false ] }, - "hash": "da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7" + "hash": "5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2" } diff --git a/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json b/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json deleted file mode 100644 index c2d9fe2e1ac..00000000000 --- a/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974" -} diff --git a/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json b/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json new file mode 100644 index 00000000000..c95a5bc6bd4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a" +} diff --git a/core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json b/core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json similarity index 57% rename from core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json rename to core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json index 3052b3a04d1..95957160124 100644 --- a/core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json +++ b/core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_execute_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_execute_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b" + "hash": "5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6" } diff --git a/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json b/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json new file mode 100644 index 00000000000..306f193861f --- /dev/null +++ b/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE l1_batches\n SET\n l1_tx_count = $2,\n l2_tx_count = $3,\n l2_to_l1_messages = $4,\n bloom = $5,\n priority_ops_onchain_data = $6,\n predicted_commit_gas_cost = $7,\n predicted_prove_gas_cost = $8,\n predicted_execute_gas_cost = $9,\n initial_bootloader_heap_content = $10,\n used_contract_hashes = $11,\n bootloader_code_hash = $12,\n default_aa_code_hash = $13,\n evm_emulator_code_hash = $14,\n protocol_version = $15,\n system_logs = $16,\n storage_refunds = $17,\n pubdata_costs = $18,\n pubdata_input = $19,\n predicted_circuits_by_type = $20,\n updated_at = NOW(),\n is_sealed = TRUE\n WHERE\n number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "ByteaArray", + "Int8Array", + "Int8Array", + "Bytea", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a" +} diff --git a/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json b/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json similarity index 86% rename from core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json rename to core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json index 9eb67bb8299..dffd3ed8f9d 100644 --- a/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json +++ b/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -169,8 +174,9 @@ false, true, true, - true + true, + false ] }, - "hash": "9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24" + "hash": "7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8" } diff --git a/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json b/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json new file mode 100644 index 00000000000..df856b97702 --- /dev/null +++ b/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n timestamp,\n protocol_version,\n fee_address,\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n l1_batches\n WHERE\n NOT is_sealed\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 6, + "name": "fair_pubdata_price", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + true, + false, + false, + false, + false + ] + }, + "hash": "8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a" +} diff --git a/core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json b/core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json similarity index 72% rename from core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json rename to core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json index 100761f54b4..ea2b51d69d1 100644 --- a/core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json +++ b/core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ true ] }, - "hash": "0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41" + "hash": "8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20" } diff --git a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json b/core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json similarity index 73% rename from core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json rename to core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json index 9b989a9ba25..82af00b5606 100644 --- a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json +++ b/core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c" + "hash": "8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2" } diff --git a/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json b/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json similarity index 75% rename from core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json rename to core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json index 55d56cc4ab0..8c22b4f92c4 100644 --- a/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json +++ b/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -175,8 +180,9 @@ false, true, true, - true + true, + false ] }, - "hash": "9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361" + "hash": "942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf" } diff --git a/core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json b/core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json similarity index 69% rename from core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json rename to core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json index f9799079442..08e3b4b17a9 100644 --- a/core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json +++ b/core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MIN(number) AS \"min?\"\n FROM\n l1_batches\n WHERE\n protocol_version = $1\n ", + "query": "\n SELECT\n MIN(number) AS \"min?\"\n FROM\n l1_batches\n WHERE\n is_sealed\n AND protocol_version = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ null ] }, - "hash": "86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d" + "hash": "9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77" } diff --git a/core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json b/core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json similarity index 73% rename from core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json rename to core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json index 56fcdb38943..9a1b043e573 100644 --- a/core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json +++ b/core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1" + "hash": "a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026" } diff --git a/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json b/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json new file mode 100644 index 00000000000..78b913fcc36 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches (\n number,\n timestamp,\n protocol_version,\n fee_address,\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n initial_bootloader_heap_content,\n used_contract_hashes,\n created_at,\n updated_at,\n is_sealed\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n 0,\n 0,\n ''::bytea,\n '{}'::bytea [],\n '{}'::jsonb,\n '{}'::jsonb,\n NOW(),\n NOW(),\n FALSE\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Int4", + "Bytea", + "Int8", + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798" +} diff --git a/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json b/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json similarity index 89% rename from core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json rename to core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json index 752e171f58c..80a6946026b 100644 --- a/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json +++ b/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -171,8 +176,9 @@ true, true, true, - true + true, + false ] }, - "hash": "4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98" + "hash": "b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960" } diff --git a/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json b/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json deleted file mode 100644 index 120fac1021f..00000000000 --- a/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c" -} diff --git a/core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json b/core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json similarity index 74% rename from core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json rename to core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json index c63ea98db44..8f6d1cf7a5f 100644 --- a/core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json +++ b/core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8" + "hash": "cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b" } diff --git a/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json b/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json similarity index 58% rename from core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json rename to core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json index b2f195c4e5c..ed3270de573 100644 --- a/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json +++ b/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -132,5 +132,5 @@ false ] }, - "hash": "7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5" + "hash": "d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8" } diff --git a/core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json b/core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json similarity index 57% rename from core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json rename to core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json index 0370a63d65e..15d6096420f 100644 --- a/core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json +++ b/core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_commit_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99" + "hash": "d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef" } diff --git a/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json b/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json new file mode 100644 index 00000000000..0aac086f22a --- /dev/null +++ b/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\"\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e" +} diff --git a/core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json b/core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json similarity index 57% rename from core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json rename to core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json index 7c3a261d1f6..baabbdb4f24 100644 --- a/core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json +++ b/core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_prove_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_prove_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d" + "hash": "d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c" } diff --git a/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json b/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json similarity index 88% rename from core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json rename to core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json index 6588ee2f11e..e55d10d6f9a 100644 --- a/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json +++ b/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -173,8 +178,9 @@ false, true, true, - true + true, + false ] }, - "hash": "b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731" + "hash": "e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7" } diff --git a/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json b/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json deleted file mode 100644 index 2598be6267d..00000000000 --- a/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\"\n FROM\n miniblocks\n WHERE\n number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005" -} diff --git a/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json b/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json similarity index 79% rename from core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json rename to core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json index c93e6aef3e7..4f138822ad1 100644 --- a/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json +++ b/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -172,8 +177,9 @@ true, true, true, - true + true, + false ] }, - "hash": "05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32" + "hash": "f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8" } diff --git a/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json deleted file mode 100644 index 4fe32531a3f..00000000000 --- a/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Bytea", - "Bytea", - "Bytea", - "Int4", - "ByteaArray", - "Int8Array", - "Int8Array", - "Bytea", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950" -} diff --git a/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql new file mode 100644 index 00000000000..3706fc6630b --- /dev/null +++ b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + RENAME COLUMN is_sealed TO is_finished; +ALTER table l1_batches + DROP COLUMN fair_pubdata_price, + DROP COLUMN fee_address; diff --git a/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql new file mode 100644 index 00000000000..6b08546ea1e --- /dev/null +++ b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + RENAME COLUMN is_finished TO is_sealed; +ALTER table l1_batches + ADD COLUMN fair_pubdata_price bigint NOT NULL DEFAULT 0, + ADD COLUMN fee_address bytea NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 59cc557f36e..347152e3c38 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -17,9 +17,10 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, block::{ BlockGasCount, L1BatchHeader, L1BatchStatistics, L1BatchTreeData, L2BlockHeader, - StorageOracleInfo, + StorageOracleInfo, UnsealedL1BatchHeader, }, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, + fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, writes::TreeWrite, Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, @@ -30,7 +31,9 @@ pub use crate::models::storage_block::{L1BatchMetadataError, L1BatchWithOptional use crate::{ models::{ parse_protocol_version, - storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader}, + storage_block::{ + StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader, UnsealedStorageL1Batch, + }, storage_event::StorageL2ToL1Log, storage_oracle_info::DbStorageOracleInfo, }, @@ -89,6 +92,8 @@ impl BlocksDal<'_, '_> { COUNT(*) AS "count!" FROM l1_batches + WHERE + is_sealed "# ) .instrument("is_genesis_needed") @@ -105,6 +110,8 @@ impl BlocksDal<'_, '_> { MAX(number) AS "number" FROM l1_batches + WHERE + is_sealed "# ) .instrument("get_sealed_l1_batch_number") @@ -140,6 +147,8 @@ impl BlocksDal<'_, '_> { MIN(number) AS "number" FROM l1_batches + WHERE + is_sealed "# ) .instrument("get_earliest_l1_batch_number") @@ -334,12 +343,14 @@ impl BlocksDal<'_, '_> { compressed_state_diffs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -370,11 +381,13 @@ impl BlocksDal<'_, '_> { evm_emulator_code_hash, protocol_version, system_logs, - pubdata_input + pubdata_input, + fee_address FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -407,7 +420,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -438,7 +452,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -559,7 +574,76 @@ impl BlocksDal<'_, '_> { Ok(()) } + /// Inserts an unsealed L1 batch with some basic information (i.e. runtime related data is either + /// null or set to default value for the corresponding type). pub async fn insert_l1_batch( + &mut self, + number: L1BatchNumber, + timestamp: u64, + protocol_version: Option, + fee_address: Address, + batch_fee_input: BatchFeeInput, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + l1_batches ( + number, + timestamp, + protocol_version, + fee_address, + l1_gas_price, + l2_fair_gas_price, + fair_pubdata_price, + l1_tx_count, + l2_tx_count, + bloom, + priority_ops_onchain_data, + initial_bootloader_heap_content, + used_contract_hashes, + created_at, + updated_at, + is_sealed + ) + VALUES + ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + 0, + 0, + ''::bytea, + '{}'::bytea [], + '{}'::jsonb, + '{}'::jsonb, + NOW(), + NOW(), + FALSE + ) + "#, + i64::from(number.0), + timestamp as i64, + protocol_version.map(|v| v as i32), + fee_address.as_bytes(), + batch_fee_input.l1_gas_price() as i64, + batch_fee_input.fair_l2_gas_price() as i64, + batch_fee_input.fair_pubdata_price() as i64, + ) + .instrument("insert_l1_batch") + .with_arg("number", &number) + .execute(self.storage) + .await?; + Ok(()) + } + + /// Marks provided L1 batch as sealed and populates it with all the runtime information. + /// + /// Errors if the batch does not exist. + pub async fn mark_l1_batch_as_sealed( &mut self, header: &L1BatchHeader, initial_bootloader_contents: &[(usize, U256)], @@ -567,9 +651,9 @@ impl BlocksDal<'_, '_> { storage_refunds: &[u32], pubdata_costs: &[i32], predicted_circuits_by_type: CircuitStatistic, // predicted number of circuits for each circuit type - ) -> DalResult<()> { + ) -> anyhow::Result<()> { let initial_bootloader_contents_len = initial_bootloader_contents.len(); - let instrumentation = Instrumented::new("insert_l1_batch") + let instrumentation = Instrumented::new("mark_l1_batch_as_sealed") .with_arg("number", &header.number) .with_arg( "initial_bootloader_contents.len", @@ -596,63 +680,35 @@ impl BlocksDal<'_, '_> { let query = sqlx::query!( r#" - INSERT INTO - l1_batches ( - number, - l1_tx_count, - l2_tx_count, - timestamp, - l2_to_l1_messages, - bloom, - priority_ops_onchain_data, - predicted_commit_gas_cost, - predicted_prove_gas_cost, - predicted_execute_gas_cost, - initial_bootloader_heap_content, - used_contract_hashes, - bootloader_code_hash, - default_aa_code_hash, - evm_emulator_code_hash, - protocol_version, - system_logs, - storage_refunds, - pubdata_costs, - pubdata_input, - predicted_circuits_by_type, - created_at, - updated_at - ) - VALUES - ( - $1, - $2, - $3, - $4, - $5, - $6, - $7, - $8, - $9, - $10, - $11, - $12, - $13, - $14, - $15, - $16, - $17, - $18, - $19, - $20, - $21, - NOW(), - NOW() - ) + UPDATE l1_batches + SET + l1_tx_count = $2, + l2_tx_count = $3, + l2_to_l1_messages = $4, + bloom = $5, + priority_ops_onchain_data = $6, + predicted_commit_gas_cost = $7, + predicted_prove_gas_cost = $8, + predicted_execute_gas_cost = $9, + initial_bootloader_heap_content = $10, + used_contract_hashes = $11, + bootloader_code_hash = $12, + default_aa_code_hash = $13, + evm_emulator_code_hash = $14, + protocol_version = $15, + system_logs = $16, + storage_refunds = $17, + pubdata_costs = $18, + pubdata_input = $19, + predicted_circuits_by_type = $20, + updated_at = NOW(), + is_sealed = TRUE + WHERE + number = $1 "#, i64::from(header.number.0), i32::from(header.l1_tx_count), i32::from(header.l2_tx_count), - header.timestamp as i64, &header.l2_to_l1_messages, header.bloom.as_bytes(), &priority_onchain_data, @@ -675,13 +731,41 @@ impl BlocksDal<'_, '_> { pubdata_input, serde_json::to_value(predicted_circuits_by_type).unwrap(), ); + let update_result = instrumentation.with(query).execute(self.storage).await?; - let mut transaction = self.storage.start_transaction().await?; - instrumentation - .with(query) - .execute(&mut transaction) - .await?; - transaction.commit().await + if update_result.rows_affected() == 0 { + anyhow::bail!( + "L1 batch sealing failed: batch #{} was not found", + header.number + ); + } + + Ok(()) + } + + pub async fn get_unsealed_l1_batch(&mut self) -> DalResult> { + let batch = sqlx::query_as!( + UnsealedStorageL1Batch, + r#" + SELECT + number, + timestamp, + protocol_version, + fee_address, + l1_gas_price, + l2_fair_gas_price, + fair_pubdata_price + FROM + l1_batches + WHERE + NOT is_sealed + "#, + ) + .instrument("get_last_committed_to_eth_l1_batch") + .fetch_optional(self.storage) + .await?; + + Ok(batch.map(|b| b.into())) } pub async fn insert_l2_block(&mut self, l2_block_header: &L2BlockHeader) -> DalResult<()> { @@ -1065,7 +1149,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1252,7 +1337,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1333,7 +1419,8 @@ impl BlocksDal<'_, '_> { protocol_version, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM ( SELECT @@ -1407,7 +1494,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1535,7 +1623,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1600,7 +1689,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1679,7 +1769,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -2078,7 +2169,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_commit_tx_id IS NULL + is_sealed + AND eth_commit_tx_id IS NULL AND number > 0 ORDER BY number @@ -2100,7 +2192,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_prove_tx_id IS NULL + is_sealed + AND eth_prove_tx_id IS NULL AND number > 0 ORDER BY number @@ -2122,7 +2215,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_execute_tx_id IS NULL + is_sealed + AND eth_execute_tx_id IS NULL AND number > 0 ORDER BY number @@ -2147,7 +2241,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(l1_batch_number.0) ) @@ -2217,7 +2312,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - protocol_version = $1 + is_sealed + AND protocol_version = $1 "#, protocol_version as i32 ) @@ -2523,8 +2619,16 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> DalResult<()> { + pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> anyhow::Result<()> { self.insert_l1_batch( + header.number, + header.timestamp, + header.protocol_version, + header.fee_address, + BatchFeeInput::pubdata_independent(100, 100, 100), + ) + .await?; + self.mark_l1_batch_as_sealed( header, &[], Default::default(), @@ -2835,7 +2939,17 @@ mod tests { execute: 10, }; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) + .insert_l1_batch( + header.number, + header.timestamp, + header.protocol_version, + header.fee_address, + BatchFeeInput::pubdata_independent(100, 100, 100), + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l1_batch_as_sealed(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); @@ -2843,7 +2957,17 @@ mod tests { header.timestamp += 100; predicted_gas += predicted_gas; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) + .insert_l1_batch( + header.number, + header.timestamp, + header.protocol_version, + header.fee_address, + BatchFeeInput::pubdata_independent(100, 100, 100), + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l1_batch_as_sealed(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index c1a1e6765b6..829e15b5710 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -656,6 +656,8 @@ impl BlocksWeb3Dal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ) ) AS "l1_batch_number!", miniblocks.timestamp, diff --git a/core/lib/dal/src/consensus_dal/mod.rs b/core/lib/dal/src/consensus_dal/mod.rs index eb3385a992a..9515e93f2b3 100644 --- a/core/lib/dal/src/consensus_dal/mod.rs +++ b/core/lib/dal/src/consensus_dal/mod.rs @@ -707,6 +707,8 @@ impl ConsensusDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ), ( SELECT diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 7e9a9eca9d4..3bb433a05cf 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -6,7 +6,7 @@ use thiserror::Error; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, - block::{L1BatchHeader, L2BlockHeader}, + block::{L1BatchHeader, L2BlockHeader, UnsealedL1BatchHeader}, commitment::{L1BatchMetaParameters, L1BatchMetadata}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, @@ -53,6 +53,7 @@ pub(crate) struct StorageL1BatchHeader { // will be exactly 7 (or 8 in the event of a protocol upgrade) system logs. pub system_logs: Vec>, pub pubdata_input: Option>, + pub fee_address: Vec, } impl StorageL1BatchHeader { @@ -90,6 +91,7 @@ impl StorageL1BatchHeader { .protocol_version .map(|v| (v as u16).try_into().unwrap()), pubdata_input: self.pubdata_input, + fee_address: Address::from_slice(&self.fee_address), } } } @@ -152,6 +154,7 @@ pub(crate) struct StorageL1Batch { pub events_queue_commitment: Option>, pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, + pub fee_address: Vec, } impl StorageL1Batch { @@ -189,6 +192,7 @@ impl StorageL1Batch { .protocol_version .map(|v| (v as u16).try_into().unwrap()), pubdata_input: self.pubdata_input, + fee_address: Address::from_slice(&self.fee_address), } } } @@ -263,6 +267,38 @@ impl TryFrom for L1BatchMetadata { } } +/// Partial projection of the columns corresponding to an unsealed [`L1BatchHeader`]. +#[derive(Debug, Clone)] +pub(crate) struct UnsealedStorageL1Batch { + pub number: i64, + pub timestamp: i64, + pub protocol_version: Option, + pub fee_address: Vec, + pub l1_gas_price: i64, + pub l2_fair_gas_price: i64, + pub fair_pubdata_price: Option, +} + +impl From for UnsealedL1BatchHeader { + fn from(batch: UnsealedStorageL1Batch) -> Self { + let protocol_version: Option = batch + .protocol_version + .map(|v| (v as u16).try_into().unwrap()); + Self { + number: L1BatchNumber(batch.number as u32), + timestamp: batch.timestamp as u64, + protocol_version, + fee_address: Address::from_slice(&batch.fee_address), + fee_input: BatchFeeInput::for_protocol_version( + protocol_version.unwrap_or_else(ProtocolVersionId::last_potentially_undefined), + batch.l2_fair_gas_price as u64, + batch.fair_pubdata_price.map(|p| p as u64), + batch.l1_gas_price as u64, + ), + } + } +} + #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageBlockDetails { pub number: i64, diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 3b500e07a08..3382d8c836e 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -381,6 +381,8 @@ impl ProtocolVersionsDal<'_, '_> { protocol_version FROM l1_batches + WHERE + is_sealed ORDER BY number DESC LIMIT diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index f3a20ac39fa..78c1dc0c3d0 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -178,6 +178,8 @@ impl StorageWeb3Dal<'_, '_> { MAX(number) + 1 FROM l1_batches + WHERE + is_sealed ), ( SELECT diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index ab5684007d0..265c6135488 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -35,6 +35,8 @@ impl SyncDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ), ( SELECT diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs index b12b0218680..df0d3e86b88 100644 --- a/core/lib/dal/src/vm_runner_dal.rs +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -42,6 +42,8 @@ impl VmRunnerDal<'_, '_> { MAX(number) AS "last_batch" FROM l1_batches + WHERE + is_sealed ), processed_batches AS ( @@ -205,6 +207,8 @@ impl VmRunnerDal<'_, '_> { MAX(number) AS "last_batch" FROM l1_batches + WHERE + is_sealed ), processed_batches AS ( diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 9c1609bf175..361e9ea56d2 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -65,6 +65,16 @@ pub struct L1BatchHeader { /// Version of protocol used for the L1 batch. pub protocol_version: Option, pub pubdata_input: Option>, + pub fee_address: Address, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct UnsealedL1BatchHeader { + pub number: L1BatchNumber, + pub timestamp: u64, + pub protocol_version: Option, + pub fee_address: Address, + pub fee_input: BatchFeeInput, } /// Holder for the L2 block metadata that is not available from transactions themselves. @@ -132,6 +142,7 @@ impl L1BatchHeader { system_logs: vec![], protocol_version: Some(protocol_version), pubdata_input: Some(vec![]), + fee_address: Default::default(), } } diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index b29d01af39a..85d894b7fd5 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -87,6 +87,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora system_logs: vec![], protocol_version: Some(ProtocolVersionId::latest()), pubdata_input: None, + fee_address: Default::default(), }; storage .blocks_dal() diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 5c17add2e98..ba5e10dfb22 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -392,6 +392,7 @@ pub async fn create_genesis_l1_batch( base_system_contracts.hashes(), protocol_version.minor, ); + let batch_fee_input = BatchFeeInput::pubdata_independent(0, 0, 0); let genesis_l2_block_header = L2BlockHeader { number: L2BlockNumber(0), @@ -402,7 +403,7 @@ pub async fn create_genesis_l1_batch( fee_account_address: Default::default(), base_fee_per_gas: 0, gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(protocol_version.minor.into()), - batch_fee_input: BatchFeeInput::l1_pegged(0, 0), + batch_fee_input, base_system_contracts_hashes: base_system_contracts.hashes(), protocol_version: Some(protocol_version.minor), virtual_blocks: 0, @@ -419,6 +420,16 @@ pub async fn create_genesis_l1_batch( transaction .blocks_dal() .insert_l1_batch( + genesis_l1_batch_header.number, + genesis_l1_batch_header.timestamp, + genesis_l1_batch_header.protocol_version, + genesis_l1_batch_header.fee_address, + batch_fee_input, + ) + .await?; + transaction + .blocks_dal() + .mark_l1_batch_as_sealed( &genesis_l1_batch_header, &[], BlockGasCount::default(), diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 7687595740a..d4e7240da34 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -236,6 +236,19 @@ impl StateKeeperIO for ExternalIO { "L2 block number mismatch: expected {}, got {first_l2_block_number}", cursor.next_l2_block ); + + self.pool + .connection() + .await? + .blocks_dal() + .insert_l1_batch( + cursor.l1_batch, + params.first_l2_block.timestamp, + None, + params.operator_address, + params.fee_input, + ) + .await?; return Ok(Some(params)); } other => { diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 108283122bc..f771a2dda4c 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -148,6 +148,30 @@ impl StateKeeperIO for MempoolIO { cursor: &IoCursor, max_wait: Duration, ) -> anyhow::Result> { + // Check if there is an existing unsealed batch + if let Some(unsealed_storage_batch) = self + .pool + .connection_tagged("state_keeper") + .await? + .blocks_dal() + .get_unsealed_l1_batch() + .await? + { + return Ok(Some(L1BatchParams { + protocol_version: unsealed_storage_batch + .protocol_version + .expect("unsealed batch is missing protocol version"), + validation_computational_gas_limit: self.validation_computational_gas_limit, + operator_address: unsealed_storage_batch.fee_address, + fee_input: unsealed_storage_batch.fee_input, + first_l2_block: L2BlockParams { + timestamp: unsealed_storage_batch.timestamp, + // This value is effectively ignored by the protocol. + virtual_blocks: 1, + }, + })); + } + let deadline = Instant::now() + max_wait; // Block until at least one transaction in the mempool can match the filter (or timeout happens). @@ -191,6 +215,19 @@ impl StateKeeperIO for MempoolIO { continue; } + self.pool + .connection() + .await? + .blocks_dal() + .insert_l1_batch( + cursor.l1_batch, + timestamp, + Some(protocol_version), + self.fee_account, + self.filter.fee_input, + ) + .await?; + return Ok(Some(L1BatchParams { protocol_version, validation_computational_gas_limit: self.validation_computational_gas_limit, diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index f8106fd2423..0fc5ebb6c08 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -42,7 +42,7 @@ pub struct PendingBatchData { pub(crate) pending_l2_blocks: Vec, } -#[derive(Debug, Copy, Clone, Default)] +#[derive(Debug, Copy, Clone, Default, PartialEq)] pub struct L2BlockParams { /// The timestamp of the L2 block. pub timestamp: u64, @@ -58,7 +58,7 @@ pub struct L2BlockParams { } /// Parameters for a new L1 batch returned by [`StateKeeperIO::wait_for_new_batch_params()`]. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct L1BatchParams { /// Protocol version for the new L1 batch. pub protocol_version: ProtocolVersionId, diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 97340d6496a..16275ec672d 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -397,7 +397,7 @@ mod tests { let mut output_handler = OutputHandler::new(Box::new(persistence)) .with_handler(Box::new(TreeWritesPersistence::new(pool.clone()))); tokio::spawn(l2_block_sealer.run()); - execute_mock_batch(&mut output_handler).await; + execute_mock_batch(&mut output_handler, &pool).await; // Check that L2 block #1 and L1 batch #1 are persisted. let mut storage = pool.connection().await.unwrap(); @@ -446,9 +446,25 @@ mod tests { assert_eq!(actual_index, expected_index); } - async fn execute_mock_batch(output_handler: &mut OutputHandler) -> H256 { + async fn execute_mock_batch( + output_handler: &mut OutputHandler, + pool: &ConnectionPool, + ) -> H256 { let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); let mut updates = UpdatesManager::new(&l1_batch_env, &default_system_env()); + pool.connection() + .await + .unwrap() + .blocks_dal() + .insert_l1_batch( + l1_batch_env.number, + l1_batch_env.timestamp, + None, + l1_batch_env.fee_account, + l1_batch_env.fee_input, + ) + .await + .unwrap(); let tx = create_transaction(10, 100); let tx_hash = tx.hash(); @@ -533,7 +549,7 @@ mod tests { let mut output_handler = OutputHandler::new(Box::new(persistence)); tokio::spawn(l2_block_sealer.run()); - let tx_hash = execute_mock_batch(&mut output_handler).await; + let tx_hash = execute_mock_batch(&mut output_handler, &pool).await; // Check that the transaction is persisted. let mut storage = pool.connection().await.unwrap(); diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index 0dae7fae908..5859d27786d 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -132,6 +132,7 @@ impl UpdatesManager { protocol_version: Some(self.protocol_version()), system_logs: finished_batch.final_execution_state.system_logs.clone(), pubdata_input: finished_batch.pubdata_input.clone(), + fee_address: self.fee_account_address, }; let final_bootloader_memory = finished_batch @@ -141,7 +142,7 @@ impl UpdatesManager { transaction .blocks_dal() - .insert_l1_batch( + .mark_l1_batch_as_sealed( &l1_batch, &final_bootloader_memory, self.pending_l1_gas_count(), diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index e2a90f30691..cd60bc68b36 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -556,3 +556,53 @@ async fn different_timestamp_for_l2_blocks_in_same_batch(commitment_mode: L1Batc .expect("no new L2 block params"); assert!(l2_block_params.timestamp > current_timestamp); } + +#[test_casing(2, COMMITMENT_MODES)] +#[tokio::test] +async fn continue_unsealed_batch_on_restart(commitment_mode: L1BatchCommitmentMode) { + let connection_pool = ConnectionPool::::test_pool().await; + let tester = Tester::new(commitment_mode); + tester.genesis(&connection_pool).await; + let mut storage = connection_pool.connection().await.unwrap(); + + let (mut mempool, mut mempool_guard) = + tester.create_test_mempool_io(connection_pool.clone()).await; + let (cursor, _) = mempool.initialize().await.unwrap(); + + // Insert a transaction into the mempool in order to open a new batch. + let tx_filter = l2_tx_filter( + &tester.create_batch_fee_input_provider().await, + ProtocolVersionId::latest().into(), + ) + .await + .unwrap(); + let tx = tester.insert_tx( + &mut mempool_guard, + tx_filter.fee_per_gas, + tx_filter.gas_per_pubdata, + ); + storage + .transactions_dal() + .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .await + .unwrap(); + + let old_l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + + // Restart + drop((mempool, mempool_guard, cursor)); + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool.clone()).await; + let (cursor, _) = mempool.initialize().await.unwrap(); + + let new_l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + + assert_eq!(old_l1_batch_params, new_l1_batch_params); +} diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 02170283e94..062fc426e8c 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -25,7 +25,7 @@ use zksync_node_test_utils::{ use zksync_types::{ block::L2BlockHeader, commitment::L1BatchCommitmentMode, - fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV1}, + fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV2}, l2::L2Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, system_contracts::get_system_smart_contracts, @@ -97,8 +97,13 @@ impl Tester { MainNodeFeeInputProvider::new( gas_adjuster, Arc::new(NoOpRatioProvider::default()), - FeeModelConfig::V1(FeeModelConfigV1 { + FeeModelConfig::V2(FeeModelConfigV2 { minimal_l2_gas_price: self.minimal_l2_gas_price(), + compute_overhead_part: 1.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 10, + max_gas_per_batch: 500_000_000_000, + max_pubdata_per_batch: 100_000_000_000, }), ) } @@ -116,8 +121,13 @@ impl Tester { let batch_fee_input_provider = MainNodeFeeInputProvider::new( gas_adjuster, Arc::new(NoOpRatioProvider::default()), - FeeModelConfig::V1(FeeModelConfigV1 { + FeeModelConfig::V2(FeeModelConfigV2 { minimal_l2_gas_price: self.minimal_l2_gas_price(), + compute_overhead_part: 1.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 10, + max_gas_per_batch: 500_000_000_000, + max_pubdata_per_batch: 100_000_000_000, }), ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 0cebc5d8b47..6211755eb15 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -32,7 +32,7 @@ pub mod l2_block_updates; #[derive(Debug)] pub struct UpdatesManager { batch_timestamp: u64, - fee_account_address: Address, + pub fee_account_address: Address, batch_fee_input: BatchFeeInput, base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, From db133e6d93de400e6c1aeecfc9ed6d9f8aa046ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Thu, 10 Oct 2024 09:06:47 -0300 Subject: [PATCH 033/140] refactor(zkstack_cli): Replace curl and wget with reqwest (#3052) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Replace curl and wget with reqwest --- zk_toolbox/Cargo.lock | 11 +++--- zk_toolbox/crates/common/src/lib.rs | 2 +- zk_toolbox/crates/common/src/prerequisites.rs | 5 --- zk_toolbox/crates/zk_inception/Cargo.toml | 1 + .../contract_verifier/args/releases.rs | 37 +++++++++---------- .../src/commands/contract_verifier/init.rs | 3 +- .../src/commands/prover/compressor_keys.rs | 16 ++++---- 7 files changed, 35 insertions(+), 40 deletions(-) diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 279afaaf1b9..77316756c26 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -3278,7 +3278,7 @@ dependencies = [ "bytes", "http 1.1.0", "opentelemetry", - "reqwest 0.12.7", + "reqwest 0.12.8", ] [[package]] @@ -3295,7 +3295,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost 0.13.3", - "reqwest 0.12.7", + "reqwest 0.12.8", "thiserror", "tokio", "tonic", @@ -4078,9 +4078,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64 0.22.1", "bytes", @@ -6700,6 +6700,7 @@ dependencies = [ "lazy_static", "prost 0.12.6", "rand", + "reqwest 0.12.8", "secrecy", "serde", "serde_json", @@ -7024,7 +7025,7 @@ dependencies = [ "hex", "num", "once_cell", - "reqwest 0.12.7", + "reqwest 0.12.8", "serde", "serde_json", "thiserror", diff --git a/zk_toolbox/crates/common/src/lib.rs b/zk_toolbox/crates/common/src/lib.rs index c23ef920226..b0fbdab0d1b 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zk_toolbox/crates/common/src/lib.rs @@ -18,7 +18,7 @@ pub mod yaml; pub use prerequisites::{ check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, - PROVER_CLI_PREREQUISITE, WGET_PREREQUISITE, + PROVER_CLI_PREREQUISITE, }; pub use prompt::{init_prompt_theme, Prompt, PromptConfirm, PromptSelect}; pub use term::{error, logger, spinner}; diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zk_toolbox/crates/common/src/prerequisites.rs index 665096d8486..7845249a1ed 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zk_toolbox/crates/common/src/prerequisites.rs @@ -45,11 +45,6 @@ pub const GPU_PREREQUISITES: [Prerequisite; 3] = [ }, // CUDA GPU driver ]; -pub const WGET_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { - name: "wget", - download_link: "https://www.gnu.org/software/wget/", -}]; - pub const GCLOUD_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "gcloud", download_link: "https://cloud.google.com/sdk/docs/install", diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zk_toolbox/crates/zk_inception/Cargo.toml index 5d42dadaed1..e6687bdd981 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zk_toolbox/crates/zk_inception/Cargo.toml @@ -39,6 +39,7 @@ zksync_protobuf.workspace = true zksync_protobuf_config.workspace = true prost.workspace = true secrecy.workspace = true +reqwest = "0.12.8" [dev-dependencies] rand.workspace = true diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs index 2b2b4cf97b1..ab169220f29 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use common::{cmd::Cmd, spinner::Spinner}; +use common::spinner::Spinner; use serde::Deserialize; -use xshell::{cmd, Shell}; +use xshell::Shell; use crate::messages::{MSG_INVALID_ARCH_ERR, MSG_NO_RELEASES_FOUND_ERR}; @@ -76,22 +76,19 @@ fn get_compatible_archs(asset_name: &str) -> anyhow::Result> { fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result> { if repo == "ethereum/solc-bin" { - return get_solc_releases(shell, arch); + return get_solc_releases(arch); } - let mut cmd = cmd!( - shell, - "curl -f https://api.github.com/repos/{repo}/releases" - ); + let client = reqwest::blocking::Client::new(); + let mut request = client + .get(format!("https://api.github.com/repos/{repo}/releases")) + .header("User-Agent", "zkstack"); if let Ok(token) = shell.var("GITHUB_TOKEN") { - cmd = cmd.args(vec![ - "-H".to_string(), - format!("Authorization: Bearer {}", token), - ]); + request = request.header("Authorization", format!("Bearer {}", token)); } - let response = String::from_utf8(Cmd::new(cmd).run_with_output()?.stdout)?; + let response = request.send()?.text()?; let releases: Vec = serde_json::from_str(&response)?; let mut versions = vec![]; @@ -115,7 +112,7 @@ fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result anyhow::Result> { +fn get_solc_releases(arch: Arch) -> anyhow::Result> { let (arch_str, compatible_archs) = match arch { Arch::LinuxAmd => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), Arch::LinuxArm => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), @@ -123,13 +120,15 @@ fn get_solc_releases(shell: &Shell, arch: Arch) -> anyhow::Result> Arch::MacosArm => ("macosx-amd64", vec![Arch::MacosAmd, Arch::MacosArm]), }; - let response: std::process::Output = Cmd::new(cmd!( - shell, - "curl https://raw.githubusercontent.com/ethereum/solc-bin/gh-pages/{arch_str}/list.json" - )) - .run_with_output()?; + let client = reqwest::blocking::Client::new(); + let response = client + .get(format!( + "https://raw.githubusercontent.com/ethereum/solc-bin/gh-pages/{arch_str}/list.json" + )) + .header("User-Agent", "zkstack") + .send()? + .text()?; - let response = String::from_utf8(response.stdout)?; let solc_list: SolcList = serde_json::from_str(&response)?; let mut versions = vec![]; diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs index f376a0d36ec..b173ad9bbb7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs @@ -89,7 +89,8 @@ fn download_binary( let spinner = Spinner::new(&msg_downloading_binary_spinner(name, version)); Cmd::new(cmd!(shell, "mkdir -p {path}")).run()?; - Cmd::new(cmd!(shell, "wget {url} -O {binary_path}")).run()?; + let response = reqwest::blocking::get(url)?.bytes()?; + shell.write_file(binary_path.clone(), &response)?; Cmd::new(cmd!(shell, "chmod +x {binary_path}")).run()?; spinner.finish(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs index 703ecc18c4c..a3d40c95728 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs @@ -1,7 +1,7 @@ use anyhow::Context; -use common::{check_prerequisites, cmd::Cmd, spinner::Spinner, WGET_PREREQUISITE}; +use common::spinner::Spinner; use config::{get_link_to_prover, EcosystemConfig, GeneralConfig}; -use xshell::{cmd, Shell}; +use xshell::Shell; use super::args::compressor_keys::CompressorKeysArgs; use crate::messages::{ @@ -35,7 +35,6 @@ pub(crate) fn download_compressor_key( general_config: &mut GeneralConfig, path: &str, ) -> anyhow::Result<()> { - check_prerequisites(shell, &WGET_PREREQUISITE, false); let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER); let mut compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config .proof_compressor_config @@ -47,14 +46,13 @@ pub(crate) fn download_compressor_key( let url = compressor_config.universal_setup_download_url; let path = std::path::Path::new(path); - let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); - let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); - Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; + let client = reqwest::blocking::Client::builder() + .timeout(std::time::Duration::from_secs(600)) + .build()?; - if file_name != "setup_2^24.key" { - Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; - } + let response = client.get(url).send()?.bytes()?; + shell.write_file(path, &response)?; spinner.finish(); Ok(()) From 2bf74b64fe16aba61d7b103d0b867cab30748f4b Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Thu, 10 Oct 2024 14:08:06 +0200 Subject: [PATCH 034/140] feat(zkstack_cli): Add more folders to containers' PATH (#3057) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- docker/Makefile | 1 + docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile | 4 +++- docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile | 4 +++- docker/zk-environment/Dockerfile | 4 +++- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/docker/Makefile b/docker/Makefile index c469587c8ff..d4b94680408 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -66,6 +66,7 @@ prepare-contracts: check-tools check-contracts @cd ../ && \ export ZKSYNC_HOME=$$(pwd) && \ export PATH=$$PATH:$${ZKSYNC_HOME}/bin && \ + export PATH=$$PATH:$${ZKSYNC_HOME}/zkstack_cli/zkstackup && \ zkt || true && \ zk_supervisor contracts && \ mkdir -p contracts/l1-contracts/artifacts diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile index 0c0fd7a9bb3..79d3bb3d6af 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile @@ -104,7 +104,9 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" \ + PATH="${ZKSYNC_HOME}/zkstack_cli/zkstackup:${PATH}" \ + PATH="${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile index 5bd569b7d20..48bd4469639 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile @@ -93,7 +93,9 @@ RUN wget -c https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksol # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" \ + PATH="${ZKSYNC_HOME}/zkstack_cli/zkstackup:${PATH}" \ + PATH="${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 53e53265311..04b2cb947a0 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -164,7 +164,9 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" \ + PATH="${ZKSYNC_HOME}/zkstack_cli/zkstackup:${PATH}" \ + PATH="${HOME}/.local/bin:${PATH}" ENV CI=1 ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache From 40353616f278800dc80fcbe5f2a6483019033b20 Mon Sep 17 00:00:00 2001 From: Vladislav Volosnikov Date: Thu, 10 Oct 2024 15:38:21 +0300 Subject: [PATCH 035/140] feat: Handle new yul compilation flow (#3038) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ New versions of `zksolc` have several breaking changes. The following changes require updates to the zksync-era: 1. New output path for yul and zasm contracts (`.yul/.yul.zbin` instead of `.yul.zbin`) 2. `.zbin` files now contain utf8 encoded bytecode instead of binary This pull request adds support for these changes while maintaining backwards compatibility. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/lib/contracts/src/lib.rs | 54 +++++++++++++++---- .../multivm/src/versions/vm_1_3_2/utils.rs | 9 +--- .../src/versions/vm_fast/tests/utils.rs | 8 ++- .../src/versions/vm_latest/tests/utils.rs | 9 ++-- core/lib/multivm/src/versions/vm_m5/utils.rs | 9 +--- core/lib/multivm/src/versions/vm_m6/utils.rs | 9 +--- core/tests/upgrade-test/tests/upgrade.test.ts | 35 +++++++++--- 7 files changed, 84 insertions(+), 49 deletions(-) diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index fb28693887a..7e133f8dee3 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -259,19 +259,19 @@ impl SystemContractsRepo { "artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json", directory, name ))), - ContractLanguage::Yul => read_zbin_bytecode_from_path(self.root.join(format!( - "contracts-preprocessed/{0}artifacts/{1}.yul.zbin", - directory, name - ))), + ContractLanguage::Yul => { + let artifacts_path = self + .root + .join(format!("contracts-preprocessed/{}artifacts/", directory)); + read_yul_bytecode_by_path(artifacts_path, name) + } } } } pub fn read_bootloader_code(bootloader_type: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/build/artifacts/{}.yul.zbin", - bootloader_type - )) + let artifacts_path = "contracts/system-contracts/bootloader/build/artifacts/"; + read_yul_bytecode(artifacts_path, bootloader_type) } fn read_proved_batch_bootloader_bytecode() -> Vec { @@ -288,10 +288,46 @@ pub fn read_zbin_bytecode(relative_zbin_path: impl AsRef) -> Vec { read_zbin_bytecode_from_path(bytecode_path) } +pub fn read_yul_bytecode(relative_artifacts_path: &str, name: &str) -> Vec { + let artifacts_path = Path::new(&home_path()).join(relative_artifacts_path); + read_yul_bytecode_by_path(artifacts_path, name) +} + +pub fn read_yul_bytecode_by_path(artifacts_path: PathBuf, name: &str) -> Vec { + let bytecode_path = artifacts_path.join(format!("{name}.yul/{name}.yul.zbin")); + + // Legacy versions of zksolc use the following path for output data if a yul file is being compiled: .yul.zbin + // New zksolc versions use .yul/.yul.zbin, for consistency with solidity files compilation. + // In addition, the output of the legacy zksolc in this case is a binary file, while in new versions it is hex encoded. + if fs::exists(&bytecode_path) + .unwrap_or_else(|err| panic!("Invalid path: {bytecode_path:?}, {err}")) + { + read_zbin_bytecode_from_hex_file(bytecode_path) + } else { + let bytecode_path_legacy = artifacts_path.join(format!("{name}.yul.zbin")); + + if fs::exists(&bytecode_path_legacy) + .unwrap_or_else(|err| panic!("Invalid path: {bytecode_path_legacy:?}, {err}")) + { + read_zbin_bytecode_from_path(bytecode_path_legacy) + } else { + panic!("Can't find bytecode for '{name}' yul contract at {artifacts_path:?}") + } + } +} + /// Reads zbin bytecode from a given path. fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { fs::read(&bytecode_path) - .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) + .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {bytecode_path:?}: {err}")) +} + +/// Reads zbin bytecode from a given path as utf8 text file. +fn read_zbin_bytecode_from_hex_file(bytecode_path: PathBuf) -> Vec { + let bytes = fs::read(&bytecode_path) + .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {bytecode_path:?}: {err}")); + + hex::decode(bytes).unwrap_or_else(|err| panic!("Invalid input file: {bytecode_path:?}, {err}")) } /// Hash of code and code which consists of 32 bytes words diff --git a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs index da4e2f5350f..7870b1ff744 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_3::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -221,13 +221,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - pub(crate) fn calculate_computational_gas_used< S: WriteStorage, T: PubdataSpentTracer, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index 5ab5aa0dec9..76ca9bc5dd3 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use ethabi::Contract; use once_cell::sync::Lazy; use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, + load_contract, read_bytecode, read_yul_bytecode, BaseSystemContracts, SystemContractCode, }; use zksync_types::{ utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H160, H256, @@ -64,10 +64,8 @@ pub(crate) fn read_test_contract() -> Vec { } pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); + let artifacts_path = "contracts/system-contracts/bootloader/tests/artifacts/"; + let bootloader_code = read_yul_bytecode(artifacts_path, test); let bootloader_hash = hash_bytecode(&bootloader_code); SystemContractCode { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index c5487379ce3..4d728962feb 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -1,7 +1,8 @@ use ethabi::Contract; use once_cell::sync::Lazy; use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, + load_contract, read_bytecode, read_yul_bytecode, read_zbin_bytecode, BaseSystemContracts, + SystemContractCode, }; use zksync_types::{ utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, @@ -59,10 +60,8 @@ pub(crate) fn read_test_contract() -> Vec { } pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); + let artifacts_path = "contracts/system-contracts/bootloader/tests/artifacts/"; + let bootloader_code = read_yul_bytecode(artifacts_path, test); let bootloader_hash = hash_bytecode(&bootloader_code); SystemContractCode { diff --git a/core/lib/multivm/src/versions/vm_m5/utils.rs b/core/lib/multivm/src/versions/vm_m5/utils.rs index 8c5bca674c6..a38618395b1 100644 --- a/core/lib/multivm/src/versions/vm_m5/utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -253,13 +253,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - /// Log query, which handle initial and repeated writes to the storage #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct StorageLogQuery { diff --git a/core/lib/multivm/src/versions/vm_m6/utils.rs b/core/lib/multivm/src/versions/vm_m6/utils.rs index d9709022fe3..912a30a4eaf 100644 --- a/core/lib/multivm/src/versions/vm_m6/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -256,13 +256,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - pub(crate) fn calculate_computational_gas_used< S: Storage, T: PubdataSpentTracer, diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 2e223b9d744..665b570ede7 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -64,9 +64,21 @@ describe('Upgrade test', function () { complexUpgraderAddress = '0x000000000000000000000000000000000000800f'; if (fileConfig.loadFromFile) { - const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); - const contractsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'contracts.yaml' }); - const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); + const generalConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'general.yaml' + }); + const contractsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'contracts.yaml' + }); + const secretsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'secrets.yaml' + }); ethProviderAddress = secretsConfig.l1.l1_rpc_url; web3JsonRpc = generalConfig.api.web3_json_rpc.http_url; @@ -89,7 +101,11 @@ describe('Upgrade test', function () { alice = tester.emptyWallet(); if (fileConfig.loadFromFile) { - const chainWalletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); + const chainWalletConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'wallets.yaml' + }); adminGovWallet = new ethers.Wallet(chainWalletConfig.governor.private_key, alice._providerL1()); @@ -220,8 +236,15 @@ describe('Upgrade test', function () { }); step('Send l1 tx for saving new bootloader', async () => { - const path = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; - const bootloaderCode = ethers.hexlify(fs.readFileSync(path)); + const path = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul/playground_batch.yul.zbin`; + let bootloaderCode; + if (fs.existsSync(path)) { + bootloaderCode = '0x'.concat(fs.readFileSync(path).toString()); + } else { + const legacyPath = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; + bootloaderCode = ethers.hexlify(fs.readFileSync(legacyPath)); + } + bootloaderHash = ethers.hexlify(zksync.utils.hashBytecode(bootloaderCode)); const txHandle = await tester.syncWallet.requestExecute({ contractAddress: ethers.ZeroAddress, From da9f645a41d5413999658794169185574b802931 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 10 Oct 2024 15:20:58 +0200 Subject: [PATCH 036/140] feat(zk_toolbox): improved wallet handling (#3058) Made the tool fail with an error when wallets.yaml has parsing errors, rather than displaying "not found". Made the tool fail when the wallets.yaml is unparseable instead of blindly overwriting it (in Localhost mode). Made the tool check that the address actually matches the private key when parsing the config. Made the tool check that the private key is actually a valid private key when parsing the config. Made the code pass around Wallet, rather than weakly typed H256. --- zk_toolbox/crates/common/src/ethereum.rs | 5 +- zk_toolbox/crates/common/src/forge.rs | 12 ++-- zk_toolbox/crates/common/src/wallets.rs | 70 ++++++++++++++----- zk_toolbox/crates/config/src/chain.rs | 4 +- zk_toolbox/crates/config/src/ecosystem.rs | 4 +- zk_toolbox/crates/config/src/wallets.rs | 12 +--- zk_toolbox/crates/types/src/lib.rs | 2 +- .../zk_inception/src/accept_ownership.rs | 15 ++-- .../commands/chain/accept_chain_ownership.rs | 2 +- .../src/commands/chain/deploy_l2_contracts.rs | 5 +- .../src/commands/chain/deploy_paymaster.rs | 5 +- .../src/commands/chain/init/mod.rs | 4 +- .../src/commands/chain/register_chain.rs | 2 +- .../chain/set_token_multiplier_setter.rs | 11 +-- .../src/commands/chain/setup_legacy_bridge.rs | 5 +- .../src/commands/consensus/mod.rs | 6 +- .../src/commands/ecosystem/common.rs | 2 +- .../src/commands/ecosystem/init.rs | 17 ++--- .../crates/zk_inception/src/utils/forge.rs | 14 ++-- .../zk_supervisor/src/commands/test/utils.rs | 9 +-- 20 files changed, 111 insertions(+), 95 deletions(-) diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zk_toolbox/crates/common/src/ethereum.rs index 33caaad9789..2100746fecf 100644 --- a/zk_toolbox/crates/common/src/ethereum.rs +++ b/zk_toolbox/crates/common/src/ethereum.rs @@ -6,18 +6,17 @@ use ethers::{ middleware::MiddlewareBuilder, prelude::{Http, LocalWallet, Provider, Signer, SignerMiddleware}, providers::Middleware, - types::{Address, TransactionRequest, H256}, + types::{Address, TransactionRequest}, }; use types::TokenInfo; use crate::{logger, wallets::Wallet}; pub fn create_ethers_client( - private_key: H256, + mut wallet: LocalWallet, l1_rpc: String, chain_id: Option, ) -> anyhow::Result, ethers::prelude::Wallet>> { - let mut wallet = LocalWallet::from_bytes(private_key.as_bytes())?; if let Some(chain_id) = chain_id { wallet = wallet.with_chain_id(chain_id); } diff --git a/zk_toolbox/crates/common/src/forge.rs b/zk_toolbox/crates/common/src/forge.rs index 7fd5399cc66..e573e492aa4 100644 --- a/zk_toolbox/crates/common/src/forge.rs +++ b/zk_toolbox/crates/common/src/forge.rs @@ -143,10 +143,12 @@ impl ForgeScript { } // Do not start the script if balance is not enough - pub fn private_key(&self) -> Option { + pub fn private_key(&self) -> Option { self.args.args.iter().find_map(|a| { if let ForgeScriptArg::PrivateKey { private_key } = a { - Some(H256::from_str(private_key).unwrap()) + let key = H256::from_str(private_key).unwrap(); + let key = LocalWallet::from_bytes(key.as_bytes()).unwrap(); + Some(key) } else { None } @@ -164,11 +166,7 @@ impl ForgeScript { } pub fn address(&self) -> Option

{ - self.private_key().and_then(|a| { - LocalWallet::from_bytes(a.as_bytes()) - .ok() - .map(|a| Address::from_slice(a.address().as_bytes())) - }) + self.private_key().map(|k| k.address()) } pub async fn get_the_balance(&self) -> anyhow::Result> { diff --git a/zk_toolbox/crates/common/src/wallets.rs b/zk_toolbox/crates/common/src/wallets.rs index ed5e11b3261..43a9864474c 100644 --- a/zk_toolbox/crates/common/src/wallets.rs +++ b/zk_toolbox/crates/common/src/wallets.rs @@ -1,31 +1,70 @@ use ethers::{ - core::rand::Rng, + core::rand::{CryptoRng, Rng}, signers::{coins_bip39::English, LocalWallet, MnemonicBuilder, Signer}, types::{Address, H256}, }; use serde::{Deserialize, Serialize}; +use types::parse_h256; -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Wallet { +#[derive(Serialize, Deserialize)] +struct WalletSerde { pub address: Address, pub private_key: Option, } -impl Wallet { - pub fn random(rng: &mut impl Rng) -> Self { - let private_key = H256::random_using(rng); - let local_wallet = LocalWallet::from_bytes(private_key.as_bytes()).unwrap(); +#[derive(Debug, Clone)] +pub struct Wallet { + pub address: Address, + pub private_key: Option, +} - Self { - address: Address::from_slice(local_wallet.address().as_bytes()), - private_key: Some(private_key), +impl<'de> Deserialize<'de> for Wallet { + fn deserialize>(d: D) -> Result { + let x = WalletSerde::deserialize(d)?; + Ok(match x.private_key { + None => Self { + address: x.address, + private_key: None, + }, + Some(k) => { + let k = LocalWallet::from_bytes(k.as_bytes()).map_err(serde::de::Error::custom)?; + if k.address() != x.address { + return Err(serde::de::Error::custom(format!( + "address does not match private key: got address {:#x}, want {:#x}", + x.address, + k.address(), + ))); + } + Self::new(k) + } + }) + } +} + +impl Serialize for Wallet { + fn serialize(&self, s: S) -> Result { + WalletSerde { + address: self.address, + private_key: self.private_key_h256(), } + .serialize(s) + } +} + +impl Wallet { + pub fn private_key_h256(&self) -> Option { + self.private_key + .as_ref() + .map(|k| parse_h256(k.signer().to_bytes().as_slice()).unwrap()) + } + + pub fn random(rng: &mut (impl Rng + CryptoRng)) -> Self { + Self::new(LocalWallet::new(rng)) } - pub fn new_with_key(private_key: H256) -> Self { - let local_wallet = LocalWallet::from_bytes(private_key.as_bytes()).unwrap(); + pub fn new(private_key: LocalWallet) -> Self { Self { - address: Address::from_slice(local_wallet.address().as_bytes()), + address: private_key.address(), private_key: Some(private_key), } } @@ -35,14 +74,13 @@ impl Wallet { .phrase(mnemonic) .derivation_path(&format!("{}/{}", base_path, index))? .build()?; - let private_key = H256::from_slice(&wallet.signer().to_bytes()); - Ok(Self::new_with_key(private_key)) + Ok(Self::new(wallet)) } pub fn empty() -> Self { Self { address: Address::zero(), - private_key: Some(H256::zero()), + private_key: None, } } } diff --git a/zk_toolbox/crates/config/src/chain.rs b/zk_toolbox/crates/config/src/chain.rs index affc8ccc770..d6b6e2b866b 100644 --- a/zk_toolbox/crates/config/src/chain.rs +++ b/zk_toolbox/crates/config/src/chain.rs @@ -87,8 +87,8 @@ impl ChainConfig { pub fn get_wallets_config(&self) -> anyhow::Result { let path = self.configs.join(WALLETS_FILE); - if let Ok(wallets) = WalletsConfig::read(self.get_shell(), &path) { - return Ok(wallets); + if self.get_shell().path_exists(&path) { + return WalletsConfig::read(self.get_shell(), &path); } if self.wallet_creation == WalletCreation::Localhost { let wallets = create_localhost_wallets(self.get_shell(), &self.link_to_code, self.id)?; diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index a5fcd8b7219..7ac81cd5394 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -196,8 +196,8 @@ impl EcosystemConfig { pub fn get_wallets(&self) -> anyhow::Result { let path = self.config.join(WALLETS_FILE); - if let Ok(wallets) = WalletsConfig::read(self.get_shell(), &path) { - return Ok(wallets); + if self.get_shell().path_exists(&path) { + return WalletsConfig::read(self.get_shell(), &path); } if self.wallet_creation == WalletCreation::Localhost { // Use 0 id for ecosystem wallets diff --git a/zk_toolbox/crates/config/src/wallets.rs b/zk_toolbox/crates/config/src/wallets.rs index 9c87453954e..c650781bff5 100644 --- a/zk_toolbox/crates/config/src/wallets.rs +++ b/zk_toolbox/crates/config/src/wallets.rs @@ -1,6 +1,5 @@ use common::wallets::Wallet; -use ethers::types::H256; -use rand::Rng; +use rand::{CryptoRng, Rng}; use serde::{Deserialize, Serialize}; use crate::{ @@ -20,7 +19,7 @@ pub struct WalletsConfig { impl WalletsConfig { /// Generate random wallets - pub fn random(rng: &mut impl Rng) -> Self { + pub fn random(rng: &mut (impl CryptoRng + Rng)) -> Self { Self { deployer: Some(Wallet::random(rng)), operator: Wallet::random(rng), @@ -42,13 +41,6 @@ impl WalletsConfig { token_multiplier_setter: Some(Wallet::empty()), } } - pub fn deployer_private_key(&self) -> Option { - self.deployer.as_ref().and_then(|wallet| wallet.private_key) - } - - pub fn governor_private_key(&self) -> Option { - self.governor.private_key - } } impl FileConfigWithDefaultName for WalletsConfig { diff --git a/zk_toolbox/crates/types/src/lib.rs b/zk_toolbox/crates/types/src/lib.rs index 8b647057105..075e39345bc 100644 --- a/zk_toolbox/crates/types/src/lib.rs +++ b/zk_toolbox/crates/types/src/lib.rs @@ -10,5 +10,5 @@ pub use prover_mode::*; pub use token_info::*; pub use wallet_creation::*; pub use zksync_basic_types::{ - commitment::L1BatchCommitmentMode, protocol_version::ProtocolSemanticVersion, + commitment::L1BatchCommitmentMode, parse_h256, protocol_version::ProtocolSemanticVersion, }; diff --git a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs b/zk_toolbox/crates/zk_inception/src/accept_ownership.rs index d2bab928374..474e76e599a 100644 --- a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs +++ b/zk_toolbox/crates/zk_inception/src/accept_ownership.rs @@ -1,13 +1,10 @@ use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, spinner::Spinner, + wallets::Wallet, }; use config::{forge_interface::script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, EcosystemConfig}; -use ethers::{ - abi::parse_abi, - contract::BaseContract, - types::{Address, H256}, -}; +use ethers::{abi::parse_abi, contract::BaseContract, types::Address}; use lazy_static::lazy_static; use xshell::Shell; @@ -31,7 +28,7 @@ pub async fn accept_admin( shell: &Shell, ecosystem_config: &EcosystemConfig, admin: Address, - governor: Option, + governor: &Wallet, target_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, @@ -62,7 +59,7 @@ pub async fn accept_owner( shell: &Shell, ecosystem_config: &EcosystemConfig, governor_contract: Address, - governor: Option, + governor: &Wallet, target_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, @@ -89,10 +86,10 @@ pub async fn accept_owner( async fn accept_ownership( shell: &Shell, - governor: Option, + governor: &Wallet, mut forge: ForgeScript, ) -> anyhow::Result<()> { - forge = fill_forge_private_key(forge, governor)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; let spinner = Spinner::new(MSG_ACCEPTING_GOVERNANCE_SPINNER); forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs index 37d69fcf5bc..cf3e2981b3c 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs @@ -30,7 +30,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { shell, &ecosystem_config, contracts.l1.chain_admin_addr, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts.l1.diamond_proxy_addr, &args, l1_rpc_url.clone(), diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 8f0e04b5338..5a4f1f86f35 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -277,10 +277,7 @@ async fn call_forge( forge = forge.with_signature(signature); } - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&ecosystem_config.get_wallets()?.governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs index 0da56f0c962..4a93fcc089f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs @@ -56,10 +56,7 @@ pub async fn deploy_paymaster( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key( - forge, - chain_config.get_wallets_config()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&chain_config.get_wallets_config()?.governor))?; } if broadcast { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs index ac80a5b98f7..d92c56d2eb1 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs @@ -107,7 +107,7 @@ pub async fn init( shell, ecosystem_config, contracts_config.l1.chain_admin_addr, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.diamond_proxy_addr, &init_args.forge_args.clone(), init_args.l1_rpc_url.clone(), @@ -121,7 +121,7 @@ pub async fn init( set_token_multiplier_setter( shell, ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.chain_admin_addr, chain_config .get_wallets_config() diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs index 9f2ff41f897..65ee05a1ea5 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs @@ -81,7 +81,7 @@ pub async fn register_chain( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; + forge = fill_forge_private_key(forge, Some(&config.get_wallets()?.governor))?; check_the_balance(&forge).await?; } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs index 475725cd14e..4a6cd31b2c0 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs @@ -3,12 +3,13 @@ use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, logger, spinner::Spinner, + wallets::Wallet, }; use config::{forge_interface::script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, EcosystemConfig}; use ethers::{abi::parse_abi, contract::BaseContract, utils::hex}; use lazy_static::lazy_static; use xshell::Shell; -use zksync_basic_types::{Address, H256}; +use zksync_basic_types::Address; use crate::{ messages::{ @@ -52,7 +53,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { set_token_multiplier_setter( shell, &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.chain_admin_addr, token_multiplier_setter_address, &args.clone(), @@ -72,7 +73,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { pub async fn set_token_multiplier_setter( shell: &Shell, ecosystem_config: &EcosystemConfig, - governor: Option, + governor: &Wallet, chain_admin_address: Address, target_address: Address, forge_args: &ForgeScriptArgs, @@ -105,10 +106,10 @@ pub async fn set_token_multiplier_setter( async fn update_token_multiplier_setter( shell: &Shell, - governor: Option, + governor: &Wallet, mut forge: ForgeScript, ) -> anyhow::Result<()> { - forge = fill_forge_private_key(forge, governor)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs index 925014fe4e6..f61c640ffb6 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs @@ -59,10 +59,7 @@ pub async fn setup_legacy_bridge( ) .with_broadcast(); - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&ecosystem_config.get_wallets()?.governor))?; let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); check_the_balance(&forge).await?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs index f30e37af4bc..a21ba2d62cf 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs @@ -11,7 +11,7 @@ use ethers::{ contract::{FunctionCall, Multicall}, middleware::{Middleware, NonceManagerMiddleware, SignerMiddleware}, providers::{Http, JsonRpcClient, PendingTransaction, Provider, RawCall as _}, - signers::{LocalWallet, Signer as _}, + signers::Signer as _, types::{Address, BlockId, H256}, }; use xshell::Shell; @@ -182,9 +182,7 @@ impl Setup { .governor .private_key .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?; - let governor = LocalWallet::from_bytes(governor.as_bytes()) - .context("LocalWallet::from_bytes()")? - .with_chain_id(self.genesis.l2_chain_id.as_u64()); + let governor = governor.with_chain_id(self.genesis.l2_chain_id.as_u64()); let provider = self.provider().context("provider()")?; let signer = SignerMiddleware::new(provider, governor.clone()); // Allows us to send next transaction without waiting for the previous to complete. diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs index 950d39876b0..42b8f79b97e 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs @@ -54,7 +54,7 @@ pub async fn deploy_l1( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key(forge, wallets_config.deployer_private_key())?; + forge = fill_forge_private_key(forge, wallets_config.deployer.as_ref())?; } if broadcast { diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 6b64b740aed..bf5a4605c09 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -151,10 +151,7 @@ async fn deploy_erc20( .with_rpc_url(l1_rpc_url) .with_broadcast(); - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.deployer_private_key(), - )?; + forge = fill_forge_private_key(forge, ecosystem_config.get_wallets()?.deployer.as_ref())?; let spinner = Spinner::new(MSG_DEPLOYING_ERC20_SPINNER); check_the_balance(&forge).await?; @@ -262,7 +259,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.ecosystem_contracts.bridgehub_proxy_addr, &forge_args, l1_rpc_url.clone(), @@ -273,7 +270,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.ecosystem_contracts.bridgehub_proxy_addr, &forge_args, l1_rpc_url.clone(), @@ -284,7 +281,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.bridges.shared.l1_address, &forge_args, l1_rpc_url.clone(), @@ -295,7 +292,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.bridges.shared.l1_address, &forge_args, l1_rpc_url.clone(), @@ -306,7 +303,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .state_transition_proxy_addr, @@ -319,7 +316,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .state_transition_proxy_addr, diff --git a/zk_toolbox/crates/zk_inception/src/utils/forge.rs b/zk_toolbox/crates/zk_inception/src/utils/forge.rs index cabc8ff7566..355cf7b5f93 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/forge.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/forge.rs @@ -1,6 +1,6 @@ -use anyhow::anyhow; -use common::forge::ForgeScript; -use ethers::types::{H256, U256}; +use anyhow::Context as _; +use common::{forge::ForgeScript, wallets::Wallet}; +use ethers::types::U256; use crate::{ consts::MINIMUM_BALANCE_FOR_WALLET, @@ -9,10 +9,14 @@ use crate::{ pub fn fill_forge_private_key( mut forge: ForgeScript, - private_key: Option, + wallet: Option<&Wallet>, ) -> anyhow::Result { if !forge.wallet_args_passed() { - forge = forge.with_private_key(private_key.ok_or(anyhow!(MSG_DEPLOYER_PK_NOT_SET_ERR))?); + forge = forge.with_private_key( + wallet + .and_then(|w| w.private_key_h256()) + .context(MSG_DEPLOYER_PK_NOT_SET_ERR)?, + ); } Ok(forge) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs index 8656ff44d31..d980490c3d5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs @@ -43,10 +43,11 @@ impl TestWallets { } pub fn get_test_pk(&self, chain_config: &ChainConfig) -> anyhow::Result { - self.get_test_wallet(chain_config)? - .private_key - .ok_or(anyhow::Error::msg("Private key not found")) - .map(|pk| pk.encode_hex::()) + Ok(self + .get_test_wallet(chain_config)? + .private_key_h256() + .context("Private key not found")? + .encode_hex()) } pub async fn init_test_wallet( From 6f5b5acc86fa7a0adb4bcf63e1e26fc2c7310ed2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Thu, 10 Oct 2024 16:20:47 +0100 Subject: [PATCH 037/140] fix: Small fixes to EN Grafana dashboard (#3062) --- .../provisioning/dashboards/Consensus.json | 12 +- .../provisioning/dashboards/General.json | 213 ++++++++++++++---- .../provisioning/dashboards/default.yml | 1 + 3 files changed, 181 insertions(+), 45 deletions(-) diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json index be869ead40b..74b4b822801 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 3, + "id": 2, "links": [], "liveNow": false, "panels": [ @@ -1005,7 +1005,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "Rate of RPC client requests, in packets per second.", + "description": "Rate of RPC client requests, in requests per second.", "fieldConfig": { "defaults": { "color": { @@ -1054,7 +1054,7 @@ } ] }, - "unit": "pps" + "unit": "reqps" }, "overrides": [] }, @@ -1098,7 +1098,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "Rate of RPC server responses, in packets per second.", + "description": "Rate of RPC server responses, in requests per second.", "fieldConfig": { "defaults": { "color": { @@ -1147,7 +1147,7 @@ } ] }, - "unit": "pps" + "unit": "reqps" }, "overrides": [] }, @@ -1202,6 +1202,6 @@ "timezone": "", "title": "Consensus", "uid": "STAAEORNk", - "version": 4, + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json index d7177ae802e..0b3cb681e3b 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 2, + "id": 1, "links": [], "liveNow": false, "panels": [ @@ -103,13 +103,49 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 0, + "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" } }, "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, "unit": "bytes" }, "overrides": [] @@ -123,18 +159,11 @@ "id": 2, "options": { "legend": { + "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, "tooltip": { "mode": "single", "sort": "none" @@ -167,7 +196,7 @@ } ], "title": "Total disk space usage", - "type": "piechart" + "type": "timeseries" }, { "datasource": { @@ -409,6 +438,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "Shows the batch numbers on the local node and the server node.", "fieldConfig": { "defaults": { "color": { @@ -421,7 +451,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 33, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -470,13 +500,13 @@ "x": 12, "y": 16 }, - "id": 4, + "id": 39, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { "mode": "single", @@ -489,14 +519,28 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "builder", "exemplar": true, - "expr": "sum by (stage) (external_node_sync_lag)", + "expr": "sum by(stage) (external_node_fetcher_l1_batch{stage=\"open\"})", "interval": "", - "legendFormat": "", + "legendFormat": "Server", + "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "zksync_consensus_storage_batch_store_next_persisted_batch", + "hide": false, + "legendFormat": "Local", + "range": true, + "refId": "B" } ], - "title": "Sync lag (blocks)", + "title": "L1 batch sync lag", "transformations": [], "type": "timeseries" }, @@ -546,8 +590,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -598,7 +641,6 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "The percentage of transactions that are being reverted or that are succeeding.", "fieldConfig": { "defaults": { "color": { @@ -610,8 +652,8 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "drawStyle": "bars", - "fillOpacity": 0, + "drawStyle": "line", + "fillOpacity": 33, "gradientMode": "none", "hideFrom": { "legend": false, @@ -619,16 +661,19 @@ "viz": false }, "lineInterpolation": "linear", - "lineWidth": 2, + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", - "spanNulls": false, + "spanNulls": true, "stacking": { "group": "A", - "mode": "percent" + "mode": "none" }, "thresholdsStyle": { "mode": "off" @@ -639,8 +684,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -657,13 +701,13 @@ "x": 12, "y": 24 }, - "id": 38, + "id": 4, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": true + "showLegend": false }, "tooltip": { "mode": "single", @@ -676,14 +720,15 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "builder", - "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", - "legendFormat": "__auto", - "range": true, + "exemplar": true, + "expr": "sum by (stage) (external_node_sync_lag)", + "interval": "", + "legendFormat": "", "refId": "A" } ], - "title": "Transactions execution status (%)", + "title": "L2 blocks sync lag", + "transformations": [], "type": "timeseries" }, { @@ -731,8 +776,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -778,6 +822,98 @@ "title": "Avg number of transactions in L2 block", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "The percentage of transactions that are being reverted or that are succeeding.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "percent" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions execution status (%)", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -823,8 +959,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -839,7 +974,7 @@ "h": 8, "w": 12, "x": 12, - "y": 32 + "y": 40 }, "id": 34, "options": { @@ -886,6 +1021,6 @@ "timezone": "", "title": "General", "uid": "1", - "version": 9, + "version": 3, "weekStart": "" } \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml index 65f33c78b0e..fac65298bbc 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml @@ -5,6 +5,7 @@ providers: orgId: 1 folder: '' type: file + allowUiUpdates: true disableDeletion: false updateIntervalSeconds: 10 # How often Grafana will scan for changed dashboards options: From 4b96b89c6afc0b50705cc8e2fd634c4e1c469855 Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Thu, 10 Oct 2024 17:33:56 +0200 Subject: [PATCH 038/140] feat(zkstack_cli): Update PATH extension command in containers (#3063) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Update PATH extension command in containers ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- docker/Makefile | 3 +-- docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile | 4 +--- docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile | 4 +--- docker/zk-environment/Dockerfile | 4 +--- 4 files changed, 4 insertions(+), 11 deletions(-) diff --git a/docker/Makefile b/docker/Makefile index d4b94680408..0229f32993a 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -65,8 +65,7 @@ check-contracts: prepare-contracts: check-tools check-contracts @cd ../ && \ export ZKSYNC_HOME=$$(pwd) && \ - export PATH=$$PATH:$${ZKSYNC_HOME}/bin && \ - export PATH=$$PATH:$${ZKSYNC_HOME}/zkstack_cli/zkstackup && \ + export PATH=$$PATH:$${ZKSYNC_HOME}/bin:$${ZKSYNC_HOME}/zkstack_cli/zkstackup && \ zkt || true && \ zk_supervisor contracts && \ mkdir -p contracts/l1-contracts/artifacts diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile index 79d3bb3d6af..90f089ba8bd 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile @@ -104,9 +104,7 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" \ - PATH="${ZKSYNC_HOME}/zkstack_cli/zkstackup:${PATH}" \ - PATH="${HOME}/.local/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile index 48bd4469639..b6b023a5b7f 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile @@ -93,9 +93,7 @@ RUN wget -c https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksol # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" \ - PATH="${ZKSYNC_HOME}/zkstack_cli/zkstackup:${PATH}" \ - PATH="${HOME}/.local/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 04b2cb947a0..c04e5720e4d 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -164,9 +164,7 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" \ - PATH="${ZKSYNC_HOME}/zkstack_cli/zkstackup:${PATH}" \ - PATH="${HOME}/.local/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache From 8ef3be34d87f735c70f965b9e0dcd47cb101e44c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Thu, 10 Oct 2024 16:28:57 +0100 Subject: [PATCH 039/140] docs: Update docs to refer to debug page (#3060) --- docs/guides/external-node/00_quick_start.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 67a1b89eef5..b24571d342f 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -6,6 +6,8 @@ Install `docker compose` and `Docker` ## Running ZKsync node locally +These commands start ZKsync node locally inside docker. + To start a mainnet instance, run: ```sh @@ -34,9 +36,10 @@ cd docker-compose-examples sudo docker compose --file testnet-external-node-docker-compose.yml down --volumes ``` -You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). +### Observability -Those commands start ZKsync node locally inside docker. +You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). +You can also access a debug page with more information about the node [here](http://localhost:5000). The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be accessed on port `3061`. From 0f45d24fc793f0c26f3c6d2e2f7bac2b5ada8c66 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Thu, 10 Oct 2024 22:36:02 +0700 Subject: [PATCH 040/140] fix(ci): Fix pseudo-check for contacts submodule in Makefile (#3065) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fix pseudo-check for contacts submodule in Makefile ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- docker/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Makefile b/docker/Makefile index 0229f32993a..d7dc80c6f34 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -55,7 +55,7 @@ check-tools: check-nodejs check-yarn check-rust check-sqlx-cli check-docker chec # Check that contracts are checkout properly check-contracts: - @if [ ! -d ../contracts/l1-contracts/lib/forge-std/foundry.toml ] || [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ + @if [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ echo "l1-contracts git submodule is missing. Please re-download repo with 'git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git'"; \ exit 1; \ fi From 748ee1b831c5fc81f6df2442dab3c862f5e19462 Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Fri, 11 Oct 2024 18:18:43 +1100 Subject: [PATCH 041/140] chore(docs): run fmt on docs (#3072) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Stuff was left misformatted after https://github.com/matter-labs/zksync-era/pull/3060. Seems like we don't run `zks fmt` on PRs that only touch MD files ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- .github/workflows/ci-docs-reusable.yml | 1 + docs/guides/external-node/00_quick_start.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 5b1d5a9bcdf..4138a7eb7a3 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -35,4 +35,5 @@ jobs: - name: Lints run: | + ci_run zk_supervisor fmt --check ci_run zk_supervisor lint -t md --check diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index b24571d342f..07e52085cf4 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -38,8 +38,8 @@ sudo docker compose --file testnet-external-node-docker-compose.yml down --volum ### Observability -You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). -You can also access a debug page with more information about the node [here](http://localhost:5000). +You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). You +can also access a debug page with more information about the node [here](http://localhost:5000). The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be accessed on port `3061`. From 0841f1ecbf2b3e4840276fb252d24bc586f29c0e Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Fri, 11 Oct 2024 10:47:01 +0200 Subject: [PATCH 042/140] ci: Add docker and build rules for prover-autoscaler (#3069) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add Dockerfile and build rules for prover-autoscaler. ## Why ❔ To build prover-autoscaler image. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- .../workflows/new-build-prover-template.yml | 7 +++--- docker/prover-autoscaler/Dockerfile | 25 +++++++++++++++++++ 2 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 docker/prover-autoscaler/Dockerfile diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml index 60c152213e6..944551c9871 100644 --- a/.github/workflows/new-build-prover-template.yml +++ b/.github/workflows/new-build-prover-template.yml @@ -40,7 +40,7 @@ on: jobs: get-protocol-version: name: Get protocol version - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} steps: @@ -86,7 +86,7 @@ jobs: needs: get-protocol-version env: PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: components: @@ -96,6 +96,7 @@ jobs: - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor + - prover-autoscaler steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: @@ -166,7 +167,7 @@ jobs: copy-images: name: Copy images between docker registries - needs: [ build-images, get-protocol-version ] + needs: [build-images, get-protocol-version] env: PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} runs-on: matterlabs-ci-runner diff --git a/docker/prover-autoscaler/Dockerfile b/docker/prover-autoscaler/Dockerfile new file mode 100644 index 00000000000..246e8099ffd --- /dev/null +++ b/docker/prover-autoscaler/Dockerfile @@ -0,0 +1,25 @@ +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +ARG DEBIAN_FRONTEND=noninteractive + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + +WORKDIR /usr/src/zksync +COPY . . + +RUN cd prover && cargo build --release --bin zksync_prover_autoscaler + +FROM ghcr.io/matter-labs/zksync-runtime-base:latest + +COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_autoscaler /usr/bin/ + +ENTRYPOINT ["/usr/bin/zksync_prover_autoscaler"] From bdeb411c593ac3d5e16158e64c4210bb00edcb0c Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Fri, 11 Oct 2024 19:48:44 +1100 Subject: [PATCH 043/140] fix(state-keeper): ensure unsealed batch is present during IO init (#3071) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Ensures unsealed L1 batch is present in the DB even if we start with re-execution. ## Why ❔ Leftover bug after #2846 ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/lib/dal/src/blocks_dal.rs | 89 ++++++++++++------- core/lib/types/src/block.rs | 12 +++ .../src/types/inputs/l1_batch_env.rs | 20 ++++- core/node/genesis/src/lib.rs | 8 +- core/node/node_sync/src/external_io.rs | 23 +++-- core/node/state_keeper/src/io/mempool.rs | 25 ++++-- core/node/state_keeper/src/io/persistence.rs | 8 +- core/node/state_keeper/src/io/tests/mod.rs | 34 +++++++ 8 files changed, 158 insertions(+), 61 deletions(-) diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 347152e3c38..d59f95192c6 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -578,11 +578,14 @@ impl BlocksDal<'_, '_> { /// null or set to default value for the corresponding type). pub async fn insert_l1_batch( &mut self, - number: L1BatchNumber, - timestamp: u64, - protocol_version: Option, - fee_address: Address, - batch_fee_input: BatchFeeInput, + unsealed_batch_header: UnsealedL1BatchHeader, + ) -> DalResult<()> { + Self::insert_l1_batch_inner(unsealed_batch_header, self.storage).await + } + + async fn insert_l1_batch_inner( + unsealed_batch_header: UnsealedL1BatchHeader, + conn: &mut Connection<'_, Core>, ) -> DalResult<()> { sqlx::query!( r#" @@ -625,21 +628,51 @@ impl BlocksDal<'_, '_> { FALSE ) "#, - i64::from(number.0), - timestamp as i64, - protocol_version.map(|v| v as i32), - fee_address.as_bytes(), - batch_fee_input.l1_gas_price() as i64, - batch_fee_input.fair_l2_gas_price() as i64, - batch_fee_input.fair_pubdata_price() as i64, + i64::from(unsealed_batch_header.number.0), + unsealed_batch_header.timestamp as i64, + unsealed_batch_header.protocol_version.map(|v| v as i32), + unsealed_batch_header.fee_address.as_bytes(), + unsealed_batch_header.fee_input.l1_gas_price() as i64, + unsealed_batch_header.fee_input.fair_l2_gas_price() as i64, + unsealed_batch_header.fee_input.fair_pubdata_price() as i64, ) .instrument("insert_l1_batch") - .with_arg("number", &number) - .execute(self.storage) + .with_arg("number", &unsealed_batch_header.number) + .execute(conn) .await?; Ok(()) } + pub async fn ensure_unsealed_l1_batch_exists( + &mut self, + unsealed_batch: UnsealedL1BatchHeader, + ) -> anyhow::Result<()> { + let mut transaction = self.storage.start_transaction().await?; + let unsealed_batch_fetched = Self::get_unsealed_l1_batch_inner(&mut transaction).await?; + + match unsealed_batch_fetched { + None => { + tracing::info!( + "Unsealed batch #{} could not be found; inserting", + unsealed_batch.number + ); + Self::insert_l1_batch_inner(unsealed_batch, &mut transaction).await?; + } + Some(unsealed_batch_fetched) => { + if unsealed_batch_fetched.number != unsealed_batch.number { + anyhow::bail!( + "fetched unsealed L1 batch #{} does not conform to expected L1 batch #{}", + unsealed_batch_fetched.number, + unsealed_batch.number + ) + } + } + } + + transaction.commit().await?; + Ok(()) + } + /// Marks provided L1 batch as sealed and populates it with all the runtime information. /// /// Errors if the batch does not exist. @@ -744,6 +777,12 @@ impl BlocksDal<'_, '_> { } pub async fn get_unsealed_l1_batch(&mut self) -> DalResult> { + Self::get_unsealed_l1_batch_inner(self.storage).await + } + + async fn get_unsealed_l1_batch_inner( + conn: &mut Connection<'_, Core>, + ) -> DalResult> { let batch = sqlx::query_as!( UnsealedStorageL1Batch, r#" @@ -761,8 +800,8 @@ impl BlocksDal<'_, '_> { NOT is_sealed "#, ) - .instrument("get_last_committed_to_eth_l1_batch") - .fetch_optional(self.storage) + .instrument("get_unsealed_l1_batch") + .fetch_optional(conn) .await?; Ok(batch.map(|b| b.into())) @@ -2621,11 +2660,7 @@ impl BlocksDal<'_, '_> { pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> anyhow::Result<()> { self.insert_l1_batch( - header.number, - header.timestamp, - header.protocol_version, - header.fee_address, - BatchFeeInput::pubdata_independent(100, 100, 100), + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), ) .await?; self.mark_l1_batch_as_sealed( @@ -2940,11 +2975,7 @@ mod tests { }; conn.blocks_dal() .insert_l1_batch( - header.number, - header.timestamp, - header.protocol_version, - header.fee_address, - BatchFeeInput::pubdata_independent(100, 100, 100), + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), ) .await .unwrap(); @@ -2958,11 +2989,7 @@ mod tests { predicted_gas += predicted_gas; conn.blocks_dal() .insert_l1_batch( - header.number, - header.timestamp, - header.protocol_version, - header.fee_address, - BatchFeeInput::pubdata_independent(100, 100, 100), + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), ) .await .unwrap(); diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 361e9ea56d2..9211a6f1d8c 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -68,6 +68,18 @@ pub struct L1BatchHeader { pub fee_address: Address, } +impl L1BatchHeader { + pub fn to_unsealed_header(&self, fee_input: BatchFeeInput) -> UnsealedL1BatchHeader { + UnsealedL1BatchHeader { + number: self.number, + timestamp: self.timestamp, + protocol_version: self.protocol_version, + fee_address: self.fee_address, + fee_input, + } + } +} + #[derive(Debug, Clone, PartialEq)] pub struct UnsealedL1BatchHeader { pub number: L1BatchNumber, diff --git a/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs b/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs index dbc94247617..0011f0b138b 100644 --- a/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs +++ b/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs @@ -1,5 +1,8 @@ use serde::{Deserialize, Serialize}; -use zksync_types::{fee_model::BatchFeeInput, Address, L1BatchNumber, H256}; +use zksync_types::{ + block::UnsealedL1BatchHeader, fee_model::BatchFeeInput, Address, L1BatchNumber, + ProtocolVersionId, H256, +}; use super::L2BlockEnv; @@ -21,3 +24,18 @@ pub struct L1BatchEnv { pub enforced_base_fee: Option, pub first_l2_block: L2BlockEnv, } + +impl L1BatchEnv { + pub fn into_unsealed_header( + self, + protocol_version: Option, + ) -> UnsealedL1BatchHeader { + UnsealedL1BatchHeader { + number: self.number, + timestamp: self.timestamp, + protocol_version, + fee_address: self.fee_account, + fee_input: self.fee_input, + } + } +} diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index ba5e10dfb22..3e4c0ee30b9 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -419,13 +419,7 @@ pub async fn create_genesis_l1_batch( .await?; transaction .blocks_dal() - .insert_l1_batch( - genesis_l1_batch_header.number, - genesis_l1_batch_header.timestamp, - genesis_l1_batch_header.protocol_version, - genesis_l1_batch_header.fee_address, - batch_fee_input, - ) + .insert_l1_batch(genesis_l1_batch_header.to_unsealed_header(batch_fee_input)) .await?; transaction .blocks_dal() diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index d4e7240da34..9148f963868 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -15,6 +15,7 @@ use zksync_state_keeper::{ updates::UpdatesManager, }; use zksync_types::{ + block::UnsealedL1BatchHeader, protocol_upgrade::ProtocolUpgradeTx, protocol_version::{ProtocolSemanticVersion, VersionPatch}, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -200,6 +201,14 @@ impl StateKeeperIO for ExternalIO { cursor.l1_batch ) })?; + storage + .blocks_dal() + .ensure_unsealed_l1_batch_exists( + l1_batch_env + .clone() + .into_unsealed_header(Some(system_env.version)), + ) + .await?; let data = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .with_context(|| { @@ -241,13 +250,13 @@ impl StateKeeperIO for ExternalIO { .connection() .await? .blocks_dal() - .insert_l1_batch( - cursor.l1_batch, - params.first_l2_block.timestamp, - None, - params.operator_address, - params.fee_input, - ) + .insert_l1_batch(UnsealedL1BatchHeader { + number: cursor.l1_batch, + timestamp: params.first_l2_block.timestamp, + protocol_version: None, + fee_address: params.operator_address, + fee_input: params.fee_input, + }) .await?; return Ok(Some(params)); } diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index f771a2dda4c..5a3fb8e4c4f 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -14,8 +14,8 @@ use zksync_mempool::L2TxFilter; use zksync_multivm::{interface::Halt, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, U256, + block::UnsealedL1BatchHeader, protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, + Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, U256, }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::time::millis_since_epoch; @@ -133,6 +133,15 @@ impl StateKeeperIO for MempoolIO { gas_per_pubdata: gas_per_pubdata as u32, }; + storage + .blocks_dal() + .ensure_unsealed_l1_batch_exists( + l1_batch_env + .clone() + .into_unsealed_header(Some(system_env.version)), + ) + .await?; + Ok(( cursor, Some(PendingBatchData { @@ -219,13 +228,13 @@ impl StateKeeperIO for MempoolIO { .connection() .await? .blocks_dal() - .insert_l1_batch( - cursor.l1_batch, + .insert_l1_batch(UnsealedL1BatchHeader { + number: cursor.l1_batch, timestamp, - Some(protocol_version), - self.fee_account, - self.filter.fee_input, - ) + protocol_version: Some(protocol_version), + fee_address: self.fee_account, + fee_input: self.filter.fee_input, + }) .await?; return Ok(Some(L1BatchParams { diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 16275ec672d..3e11285e11f 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -456,13 +456,7 @@ mod tests { .await .unwrap() .blocks_dal() - .insert_l1_batch( - l1_batch_env.number, - l1_batch_env.timestamp, - None, - l1_batch_env.fee_account, - l1_batch_env.fee_input, - ) + .insert_l1_batch(l1_batch_env.into_unsealed_header(None)) .await .unwrap(); diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index cd60bc68b36..566eebf7ab7 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -606,3 +606,37 @@ async fn continue_unsealed_batch_on_restart(commitment_mode: L1BatchCommitmentMo assert_eq!(old_l1_batch_params, new_l1_batch_params); } + +#[test_casing(2, COMMITMENT_MODES)] +#[tokio::test] +async fn insert_unsealed_batch_on_init(commitment_mode: L1BatchCommitmentMode) { + let connection_pool = ConnectionPool::::test_pool().await; + let mut tester = Tester::new(commitment_mode); + tester.genesis(&connection_pool).await; + let fee_input = BatchFeeInput::pubdata_independent(55, 555, 5555); + let tx_result = tester + .insert_l2_block(&connection_pool, 1, 5, fee_input) + .await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; + // Pre-insert L2 block without its unsealed L1 batch counterpart + tester.set_timestamp(2); + tester + .insert_l2_block(&connection_pool, 2, 5, fee_input) + .await; + + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool.clone()).await; + // Initialization is supposed to recognize that the current L1 batch is not present in the DB and + // insert it itself. + let (cursor, _) = mempool.initialize().await.unwrap(); + + // Make sure we are able to fetch the newly inserted batch's params + let l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + assert_eq!(l1_batch_params.fee_input, fee_input); + assert_eq!(l1_batch_params.first_l2_block.timestamp, 2); +} From c1ab99d8d72509ccb9a537025cc6df9df141dd96 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Fri, 11 Oct 2024 12:04:57 +0200 Subject: [PATCH 044/140] ci: Switch `Build and release Stage` to new build workflows. (#3074) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- .github/workflows/release-test-stage.yml | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 11a844fdd2b..9d78c11a183 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -39,7 +39,7 @@ jobs: - '!prover/**' setup: name: Setup - runs-on: [ matterlabs-deployer-stage ] + runs-on: [matterlabs-deployer-stage] outputs: image_tag_suffix: ${{ steps.generate-tag-suffix.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -58,11 +58,10 @@ jobs: run: | ./prover/extract-setup-data-keys.sh >> $GITHUB_OUTPUT - build-push-core-images: name: Build and push images - needs: [ setup, changed_files ] - uses: ./.github/workflows/build-core-template.yml + needs: [setup, changed_files] + uses: ./.github/workflows/new-build-core-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} @@ -72,7 +71,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-tee-prover-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -84,8 +83,8 @@ jobs: build-push-contract-verifier: name: Build and push images - needs: [ setup, changed_files ] - uses: ./.github/workflows/build-contract-verifier-template.yml + needs: [setup, changed_files] + uses: ./.github/workflows/new-build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} @@ -95,8 +94,8 @@ jobs: build-push-prover-images: name: Build and push images - needs: [ setup, changed_files ] - uses: ./.github/workflows/build-prover-template.yml + needs: [setup, changed_files] + uses: ./.github/workflows/new-build-prover-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} @@ -108,8 +107,8 @@ jobs: build-push-witness-generator-image-avx512: name: Build and push prover images with avx512 instructions - needs: [ setup, changed_files ] - uses: ./.github/workflows/build-witness-generator-template.yml + needs: [setup, changed_files] + uses: ./.github/workflows/new-build-witness-generator-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 @@ -122,7 +121,7 @@ jobs: build-gar-prover-fri-gpu-and-circuit-prover-gpu-gar: name: Build GAR prover FRI GPU - needs: [ setup, build-push-prover-images ] + needs: [setup, build-push-prover-images] uses: ./.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: From 2b94509927a0957731c858d508ab891ccbb216f0 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Fri, 11 Oct 2024 12:39:02 +0200 Subject: [PATCH 045/140] fix(nix): separate `tee_prover` and `zksync` deps (#3059) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ While `zksync` deps might not compile with nix, `tee_prover` deps must. To enable nix incompatible changes in the workspace, separate the build process completely. ## Why ❔ To enable nix incompatible changes in the workspace, which are unrelated to `tee_prover`. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. Signed-off-by: Harald Hoyer --- etc/nix/tee_prover.nix | 17 ++++++++++++----- etc/nix/zksync.nix | 8 +++++--- flake.nix | 9 ++------- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/etc/nix/tee_prover.nix b/etc/nix/tee_prover.nix index 0b424522dff..55545d1bb8e 100644 --- a/etc/nix/tee_prover.nix +++ b/etc/nix/tee_prover.nix @@ -1,12 +1,19 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: -craneLib.buildPackage (commonArgs // { +let pname = "zksync_tee_prover"; + cargoExtraArgs = "--locked -p zksync_tee_prover"; +in +craneLib.buildPackage (commonArgs // { + inherit pname; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; - cargoExtraArgs = "-p zksync_tee_prover --bin zksync_tee_prover"; - inherit cargoArtifacts; + inherit cargoExtraArgs; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + inherit pname; + inherit cargoExtraArgs; + }); postInstall = '' strip $out/bin/zksync_tee_prover diff --git a/etc/nix/zksync.nix b/etc/nix/zksync.nix index c5fffc48b09..1ecac58b5d9 100644 --- a/etc/nix/zksync.nix +++ b/etc/nix/zksync.nix @@ -1,12 +1,14 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: craneLib.buildPackage (commonArgs // { pname = "zksync"; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; cargoExtraArgs = "--all"; - inherit cargoArtifacts; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + pname = "zksync-era-workspace"; + }); outputs = [ "out" diff --git a/flake.nix b/flake.nix index ef618816f9c..3321e67e27b 100644 --- a/flake.nix +++ b/flake.nix @@ -47,7 +47,7 @@ packages = { # to ease potential cross-compilation, the overlay is used inherit (appliedOverlay.zksync-era) zksync tee_prover container-tee-prover-azure container-tee-prover-dcap; - default = appliedOverlay.zksync-era.zksync; + default = appliedOverlay.zksync-era.tee_prover; }; devShells.default = appliedOverlay.zksync-era.devShell; @@ -107,10 +107,6 @@ strictDeps = true; inherit hardeningEnable; }; - - cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { - pname = "zksync-era-workspace"; - }); in { zksync-era = rec { @@ -120,12 +116,11 @@ }; zksync = pkgs.callPackage ./etc/nix/zksync.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; + tee_prover = pkgs.callPackage ./etc/nix/tee_prover.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; From a5d57f44451f0ed6eefa7e8ec5bbfdeb0cde02a0 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Fri, 11 Oct 2024 12:39:36 +0200 Subject: [PATCH 046/140] ci: Remove unneeded ERA_BELLMAN_CUDA_RELEASE (#3076) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- .github/workflows/release-test-stage.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 9d78c11a183..5767584d5e1 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -112,7 +112,6 @@ jobs: if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 - ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl " secrets: From a3ea4ef7b462e99160797059fa1d7201308c2a63 Mon Sep 17 00:00:00 2001 From: Danil Date: Fri, 11 Oct 2024 12:59:34 +0200 Subject: [PATCH 047/140] ci: Fix folder for fee tests (#3056) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --------- Signed-off-by: Danil --- .github/workflows/ci-core-reusable.yml | 2 +- core/tests/ts-integration/tests/fees.test.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index b2044d025c4..7c7695ce56e 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -397,7 +397,7 @@ jobs: - name: Fee projection tests run: | ci_run killall -INT zksync_server || true - ci_run ./bin/run_on_all_chains.sh "zk_supervisor test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.FEES_LOGS_DIR }} - name: Run revert tests run: | diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index c41f5943efb..e99d3b67911 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -278,7 +278,6 @@ testFees('Test fees', function () { }); afterAll(async () => { - await testMaster.deinitialize(); await mainNode.killAndWaitForShutdown(); // Returning the pubdata price to the default one @@ -287,6 +286,7 @@ testFees('Test fees', function () { deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); mainNode = await mainNodeSpawner.spawnMainNode(); + await testMaster.deinitialize(); __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNode.proc.pid!); }); }); From 038c397ce842601da5109c460b09dbf9d51cf2fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Fri, 11 Oct 2024 08:08:36 -0300 Subject: [PATCH 048/140] feat(configs): Add port parameter to ConsensusConfig (#3051) Add port parameter to ConsensusConfig --- core/lib/config/src/configs/consensus.rs | 1 + core/lib/config/src/testonly.rs | 1 + core/lib/protobuf_config/src/consensus.rs | 2 + .../src/proto/core/consensus.proto | 3 ++ core/node/consensus/src/testonly.rs | 1 + etc/env/consensus_config.yaml | 1 + etc/env/en_consensus_config.yaml | 1 + etc/env/file_based/general.yaml | 7 ++++ zk_toolbox/crates/config/src/consts.rs | 2 - .../src/commands/chain/init/configs.rs | 25 ++++------- .../commands/external_node/prepare_configs.rs | 38 ++++++++--------- zk_toolbox/crates/zk_inception/src/consts.rs | 23 ---------- .../zk_inception/src/utils/consensus.rs | 42 ++----------------- .../crates/zk_inception/src/utils/ports.rs | 8 +++- 14 files changed, 52 insertions(+), 103 deletions(-) diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 918d8f4adab..7f5a0f56aa1 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -115,6 +115,7 @@ impl RpcConfig { /// Config (shared between main node and external node). #[derive(Clone, Debug, PartialEq)] pub struct ConsensusConfig { + pub port: Option, /// Local socket address to listen for the incoming connections. pub server_addr: std::net::SocketAddr, /// Public address of this node (should forward to `server_addr`) diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index a6ff30e04a9..960808aa6a6 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -802,6 +802,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusConfig { use configs::consensus::{ConsensusConfig, Host, NodePublicKey}; ConsensusConfig { + port: self.sample(rng), server_addr: self.sample(rng), public_addr: Host(self.sample(rng)), max_payload_size: self.sample(rng), diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index 81cad437fe4..2219b6a82ea 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -148,6 +148,7 @@ impl ProtoRepr for proto::Config { }; Ok(Self::Type { + port: self.port.and_then(|x| x.try_into().ok()), server_addr: required(&self.server_addr) .and_then(|x| Ok(x.parse()?)) .context("server_addr")?, @@ -182,6 +183,7 @@ impl ProtoRepr for proto::Config { fn build(this: &Self::Type) -> Self { Self { + port: this.port.map(|x| x.into()), server_addr: Some(this.server_addr.to_string()), public_addr: Some(this.public_addr.0.clone()), max_payload_size: Some(this.max_payload_size.try_into().unwrap()), diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 92527df739a..9b0d69e7270 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -70,6 +70,9 @@ message Config { reserved 3; reserved "validators"; + // Port to listen on, for incoming TCP connections. + optional uint32 port = 12; + // IP:port to listen on, for incoming TCP connections. // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). optional string server_addr = 1; // required; IpAddr diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 4538337109a..2ba961dacc3 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -154,6 +154,7 @@ fn make_config( genesis_spec: Option, ) -> config::ConsensusConfig { config::ConsensusConfig { + port: Some(cfg.server_addr.port()), server_addr: *cfg.server_addr, public_addr: config::Host(cfg.public_addr.0.clone()), max_payload_size: usize::MAX, diff --git a/etc/env/consensus_config.yaml b/etc/env/consensus_config.yaml index 304ea31fac9..2564865eeb3 100644 --- a/etc/env/consensus_config.yaml +++ b/etc/env/consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3054 server_addr: "127.0.0.1:3054" public_addr: "127.0.0.1:3054" max_payload_size: 2500000 diff --git a/etc/env/en_consensus_config.yaml b/etc/env/en_consensus_config.yaml index f759e72e891..5c428866cb6 100644 --- a/etc/env/en_consensus_config.yaml +++ b/etc/env/en_consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3055 server_addr: '127.0.0.1:3055' public_addr: '127.0.0.1:3055' max_payload_size: 2500000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index a4ba8c0201a..017d79dbe73 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -375,3 +375,10 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 + +consensus: + port: 3054 + server_addr: "127.0.0.1:3054" + public_addr: "127.0.0.1:3054" + max_payload_size: 2500000 + gossip_dynamic_inbound_limit: 100 diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index 80b204cc619..f462ce33b8f 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -62,8 +62,6 @@ pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; /// Default port for the explorer data fetcher service pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; -/// Default port for consensus service -pub const DEFAULT_CONSENSUS_PORT: u16 = 3054; pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs index d0897473b83..37ee2e076ab 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs @@ -2,7 +2,7 @@ use anyhow::Context; use common::logger; use config::{ copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, - ChainConfig, ContractsConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, ContractsConfig, EcosystemConfig, }; use ethers::types::Address; use xshell::Shell; @@ -15,13 +15,12 @@ use crate::{ }, portal::update_portal_config, }, - defaults::PORT_RANGE_END, messages::{ - MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, + MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, }, utils::{ - consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, + consensus::{generate_consensus_keys, get_consensus_secrets, get_genesis_specs}, ports::EcosystemPortsScanner, }, }; @@ -57,22 +56,14 @@ pub async fn init_configs( )?; } - // Initialize general config let mut general_config = chain_config.get_general_config()?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - let offset = ((chain_config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = - ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut consensus_config = general_config + .consensus_config + .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; let consensus_keys = generate_consensus_keys(); - let consensus_config = get_consensus_config( - chain_config, - consensus_port, - Some(consensus_keys.clone()), - None, - )?; + consensus_config.genesis_spec = Some(get_genesis_specs(chain_config, &consensus_keys)); + general_config.consensus_config = Some(consensus_config); general_config.save_with_base_path(shell, &chain_config.configs)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs index 5ab859d17f0..d714a0f8e84 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -6,12 +6,12 @@ use config::{ external_node::ENConfig, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; use zksync_config::configs::{ - consensus::{ConsensusSecrets, NodeSecretKey, Secret}, + consensus::{ConsensusConfig, ConsensusSecrets, NodeSecretKey, Secret}, DatabaseSecrets, L1Secrets, }; use zksync_consensus_crypto::TextFmt; @@ -19,14 +19,13 @@ use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, - defaults::PORT_RANGE_END, messages::{ msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PREPARING_EN_CONFIGS, }, utils::{ - consensus::{get_consensus_config, node_public_key}, + consensus::node_public_key, ports::EcosystemPortsScanner, rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, @@ -79,19 +78,12 @@ fn prepare_configs( bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); + general_en.consensus_config = None; let main_node_consensus_config = general .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - ports.add_port_info( - main_node_consensus_config.server_addr.port(), - "Main node consensus".to_string(), - ); - let offset = ((config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut en_consensus_config = main_node_consensus_config.clone(); let mut gossip_static_outbound = BTreeMap::new(); let main_node_public_key = node_public_key( @@ -101,13 +93,8 @@ fn prepare_configs( .context(MSG_CONSENSUS_SECRETS_MISSING_ERR)?, )? .context(MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR)?; - gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); - - let en_consensus_config = - get_consensus_config(config, consensus_port, None, Some(gossip_static_outbound))?; - general_en.consensus_config = Some(en_consensus_config.clone()); - en_consensus_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.gossip_static_outbound = gossip_static_outbound; // Set secrets config let node_key = roles::node::SecretKey::generate().encode(); @@ -128,16 +115,25 @@ fn prepare_configs( }), data_availability: None, }; - secrets.save_with_base_path(shell, en_configs_path)?; + let dirs = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::ExternalNode)?; set_rocks_db_config(&mut general_en, dirs)?; + general_en.save_with_base_path(shell, en_configs_path)?; en_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.save_with_base_path(shell, en_configs_path)?; + secrets.save_with_base_path(shell, en_configs_path)?; + let offset = 0; // This is zero because general_en ports already have a chain offset ports.allocate_ports_in_yaml( shell, &GeneralConfig::get_path_with_base_path(en_configs_path), - 0, // This is zero because general_en ports already have a chain offset + offset, + )?; + ports.allocate_ports_in_yaml( + shell, + &ConsensusConfig::get_path_with_base_path(en_configs_path), + offset, )?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 9f81847e333..df27d2f02d2 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -1,5 +1,3 @@ -use std::net::{IpAddr, Ipv4Addr}; - pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; @@ -12,27 +10,6 @@ pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; -#[allow(non_upper_case_globals)] -const kB: usize = 1024; - -/// Max payload size for consensus in bytes -pub const MAX_PAYLOAD_SIZE: usize = 2_500_000; -/// Max batch size for consensus in bytes -/// Compute a default batch size, so operators are not caught out by the missing setting -/// while we're still working on batch syncing. The batch interval is ~1 minute, -/// so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high -/// traffic there can be thousands of huge transactions that quickly fill up blocks -/// and there could be more blocks in a batch then expected. We chose a generous -/// limit so as not to prevent any legitimate batch from being transmitted. -pub const MAX_BATCH_SIZE: usize = MAX_PAYLOAD_SIZE * 5000 + kB; -/// Gossip dynamic inbound limit for consensus -pub const GOSSIP_DYNAMIC_INBOUND_LIMIT: usize = 100; - -/// Public address for consensus -pub const CONSENSUS_PUBLIC_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); -/// Server address for consensus -pub const CONSENSUS_SERVER_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); - /// Path to the JS runtime config for the block-explorer-app docker container to be mounted to pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs index 2979b4df0c1..946d28a33fb 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -1,24 +1,14 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - net::SocketAddr, -}; - use anyhow::Context as _; use config::ChainConfig; use secrecy::{ExposeSecret, Secret}; use zksync_config::configs::consensus::{ - AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, - NodePublicKey, NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, - WeightedAttester, WeightedValidator, + AttesterPublicKey, AttesterSecretKey, ConsensusSecrets, GenesisSpec, NodePublicKey, + NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, WeightedAttester, + WeightedValidator, }; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::{attester, node, validator}; -use crate::consts::{ - CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, - MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, -}; - pub(crate) fn parse_attester_committee( attesters: &[WeightedAttester], ) -> anyhow::Result { @@ -48,32 +38,6 @@ pub struct ConsensusPublicKeys { attester_key: attester::PublicKey, } -pub fn get_consensus_config( - chain_config: &ChainConfig, - consensus_port: u16, - consensus_keys: Option, - gossip_static_outbound: Option>, -) -> anyhow::Result { - let genesis_spec = - consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); - - let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, consensus_port); - let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, consensus_port); - - Ok(ConsensusConfig { - server_addr, - public_addr: Host(public_addr.encode()), - genesis_spec, - max_payload_size: MAX_PAYLOAD_SIZE, - gossip_dynamic_inbound_limit: GOSSIP_DYNAMIC_INBOUND_LIMIT, - max_batch_size: MAX_BATCH_SIZE, - gossip_static_inbound: BTreeSet::new(), - gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), - rpc: None, - debug_page_addr: None, - }) -} - pub fn generate_consensus_keys() -> ConsensusSecretKeys { ConsensusSecretKeys { validator_key: validator::SecretKey::generate(), diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zk_toolbox/crates/zk_inception/src/utils/ports.rs index 018fb79f345..04c8cef5ff5 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/ports.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/ports.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, ops::Range, path::Path}; +use std::{collections::HashMap, fmt, net::SocketAddr, ops::Range, path::Path}; use anyhow::{bail, Context, Result}; use config::{ @@ -109,6 +109,12 @@ impl EcosystemPorts { } } } + } else if key.as_str().map(|s| s.ends_with("addr")).unwrap_or(false) { + let socket_addr = val.as_str().unwrap().parse::()?; + if let Some(new_port) = updated_ports.get(&socket_addr.port()) { + let new_socket_addr = SocketAddr::new(socket_addr.ip(), *new_port); + *val = Value::String(new_socket_addr.to_string()); + } } } // Continue traversing From 5d339b46fee66bc3a45493586626d318380680dd Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Fri, 11 Oct 2024 13:46:35 +0200 Subject: [PATCH 049/140] feat(consensus): smooth transition to p2p syncing (BFT-515) (#3075) https://linear.app/matterlabs/issue/BFT-515/smooth-transition-from-json-rpc-to-p2p-syncing-for-ens --- core/node/consensus/src/en.rs | 58 +++++++++- core/node/consensus/src/storage/store.rs | 24 +++++ core/node/consensus/src/testonly.rs | 39 ++++++- core/node/consensus/src/tests/mod.rs | 132 +++++++++++++++++++++++ 4 files changed, 249 insertions(+), 4 deletions(-) diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index c358974fb0c..518a7ebb29a 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::BlockStore; +use zksync_consensus_storage::{BlockStore, PersistentBlockStore as _}; use zksync_dal::consensus_dal; use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; @@ -21,6 +21,10 @@ use crate::{ storage::{self, ConnectionPool}, }; +/// If less than TEMPORARY_FETCHER_THRESHOLD certificates are missing, +/// the temporary fetcher will stop fetching blocks. +pub(crate) const TEMPORARY_FETCHER_THRESHOLD: u64 = 10; + /// External node. pub(super) struct EN { pub(super) pool: ConnectionPool, @@ -120,6 +124,20 @@ impl EN { .wrap("Store::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + // Run the temporary fetcher until the certificates are backfilled. + // Temporary fetcher should be removed once json RPC syncing is fully deprecated. + s.spawn_bg({ + let store = store.clone(); + async { + let store = store; + self.temporary_block_fetcher(ctx, &store).await?; + tracing::info!( + "temporary block fetcher finished, switching to p2p fetching only" + ); + Ok(()) + } + }); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; @@ -358,8 +376,42 @@ impl EN { } } + /// Fetches blocks from the main node directly, until the certificates + /// are backfilled. This allows for smooth transition from json RPC to p2p block syncing. + pub(crate) async fn temporary_block_fetcher( + &self, + ctx: &ctx::Ctx, + store: &Store, + ) -> ctx::Result<()> { + const MAX_CONCURRENT_REQUESTS: usize = 30; + scope::run!(ctx, |ctx, s| async { + let (send, mut recv) = ctx::channel::bounded(MAX_CONCURRENT_REQUESTS); + s.spawn(async { + let Some(mut next) = store.next_block(ctx).await? else { + return Ok(()); + }; + while store.persisted().borrow().next().0 + TEMPORARY_FETCHER_THRESHOLD < next.0 { + let n = L2BlockNumber(next.0.try_into().context("overflow")?); + self.sync_state.wait_for_main_node_block(ctx, n).await?; + send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; + next = next.next(); + } + drop(send); + Ok(()) + }); + while let Ok(block) = recv.recv_or_disconnected(ctx).await? { + store + .queue_next_fetched_block(ctx, block.join(ctx).await?) + .await + .wrap("queue_next_fetched_block()")?; + } + Ok(()) + }) + .await + } + /// Fetches blocks from the main node in range `[cursor.next()..end)`. - pub(super) async fn fetch_blocks( + async fn fetch_blocks( &self, ctx: &ctx::Ctx, queue: &mut storage::PayloadQueue, @@ -373,7 +425,7 @@ impl EN { s.spawn(async { let send = send; while end.map_or(true, |end| next < end) { - let n = L2BlockNumber(next.0.try_into().unwrap()); + let n = L2BlockNumber(next.0.try_into().context("overflow")?); self.sync_state.wait_for_main_node_block(ctx, n).await?; send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; next = next.next(); diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index ed83758ba9f..7267d7e1c82 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -111,6 +111,30 @@ impl Store { async fn conn(&self, ctx: &ctx::Ctx) -> ctx::Result { self.pool.connection(ctx).await.wrap("connection") } + + /// Number of the next block to queue. + pub(crate) async fn next_block( + &self, + ctx: &ctx::Ctx, + ) -> ctx::OrCanceled> { + Ok(sync::lock(ctx, &self.block_payloads) + .await? + .as_ref() + .map(|p| p.next())) + } + + /// Queues the next block. + pub(crate) async fn queue_next_fetched_block( + &self, + ctx: &ctx::Ctx, + block: FetchedBlock, + ) -> ctx::Result<()> { + let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + if let Some(payloads) = &mut *payloads { + payloads.send(block).await.context("payloads.send()")?; + } + Ok(()) + } } impl PersistedBlockState { diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 2ba961dacc3..4ebcf5c9a61 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -45,7 +45,10 @@ use zksync_types::{ }; use zksync_web3_decl::client::{Client, DynClient, L2}; -use crate::{en, storage::ConnectionPool}; +use crate::{ + en, + storage::{ConnectionPool, Store}, +}; /// Fake StateKeeper for tests. #[derive(Debug)] @@ -417,6 +420,40 @@ impl StateKeeper { .await } + pub async fn run_temporary_fetcher( + self, + ctx: &ctx::Ctx, + client: Box>, + ) -> ctx::Result<()> { + scope::run!(ctx, |ctx, s| async { + let payload_queue = self + .pool + .connection(ctx) + .await + .wrap("connection()")? + .new_payload_queue(ctx, self.actions_sender, self.sync_state.clone()) + .await + .wrap("new_payload_queue()")?; + let (store, runner) = Store::new( + ctx, + self.pool.clone(), + Some(payload_queue), + Some(client.clone()), + ) + .await + .wrap("Store::new()")?; + s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + en::EN { + pool: self.pool.clone(), + client, + sync_state: self.sync_state.clone(), + } + .temporary_block_fetcher(ctx, &store) + .await + }) + .await + } + /// Runs consensus node for the external node. pub async fn run_consensus( self, diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 94fbcbb90d8..8da17cfba8a 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -13,8 +13,10 @@ use zksync_consensus_storage::{BlockStore, PersistentBlockStore}; use zksync_dal::consensus_dal; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; use crate::{ + en::TEMPORARY_FETCHER_THRESHOLD, mn::run_main_node, storage::{ConnectionPool, Store}, testonly, @@ -665,6 +667,136 @@ async fn test_p2p_fetcher_backfill_certs( .unwrap(); } +// Test temporary fetcher fetching blocks if a lot of certs are missing. +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[tokio::test] +async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + // We force certs to be missing on EN by having 1 of the validators permanently offline. + // This way no blocks will be finalized at all, so no one will have certs. + let setup = Setup::new(rng, 2); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + // Wait for the consensus to be initialized. + while ctx.wait(client.consensus_global_config()).await??.is_none() { + ctx.sleep(time::Duration::milliseconds(100)).await?; + } + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + tracing::info!("Run centralized fetcher, so that there is a lot of certs missing."); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_fetcher(ctx, client.clone())); + validator + .push_random_blocks(rng, account, TEMPORARY_FETCHER_THRESHOLD as usize + 1) + .await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + tracing::info!( + "Run p2p fetcher. Blocks should be fetched by the temporary fetcher anyway." + ); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + Ok(()) + }) + .await + .unwrap(); +} + +// Test that temporary fetcher terminates once enough blocks have certs. +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[tokio::test] +async fn test_temporary_fetcher_termination(from_snapshot: bool, version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 1); + let pregenesis = true; + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + // Run the EN so the consensus is initialized on EN and wait for it to sync. + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + // Run the temporary fetcher. It should terminate immediately, since EN is synced. + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + node.run_temporary_fetcher(ctx, client).await?; + + Ok(()) + }) + .await + .unwrap(); +} + #[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] async fn test_with_pruning(version: ProtocolVersionId, pregenesis: bool) { From 9b0a6067923c5276f560f3abccedc4e6a5167dda Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Fri, 11 Oct 2024 15:00:59 +0200 Subject: [PATCH 050/140] feat(zktoolbox): added checking the contract owner in set-attester-committee command (#3061) This way we obtain a human readable error in case wrong account is used to update the attester committee. --- core/lib/protobuf_config/src/lib.rs | 2 +- .../src/commands/consensus/mod.rs | 40 +++++++++++++------ 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 48bc5f1ce13..68f7f699de2 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -64,7 +64,7 @@ pub fn read_optional_repr(field: &Option

) -> Option { .transpose() // This error will printed, only if the config partially filled, allows to debug config issues easier .map_err(|err| { - tracing::error!("Failed to serialize config: {err}"); + tracing::error!("Failed to parse config: {err:#}"); err }) .ok() diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs index a21ba2d62cf..1855a5943dc 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs @@ -3,7 +3,7 @@ use std::{borrow::Borrow, collections::HashMap, path::PathBuf, sync::Arc}; /// Consensus registry contract operations. /// Includes code duplicated from `zksync_node_consensus::registry::abi`. use anyhow::Context as _; -use common::logger; +use common::{logger, wallets::Wallet}; use config::EcosystemConfig; use conv::*; use ethers::{ @@ -11,7 +11,7 @@ use ethers::{ contract::{FunctionCall, Multicall}, middleware::{Middleware, NonceManagerMiddleware, SignerMiddleware}, providers::{Http, JsonRpcClient, PendingTransaction, Provider, RawCall as _}, - signers::Signer as _, + signers::{LocalWallet, Signer as _}, types::{Address, BlockId, H256}, }; use xshell::Shell; @@ -174,19 +174,20 @@ impl Setup { )?) } - fn governor(&self) -> anyhow::Result> { - let governor = self + fn governor(&self) -> anyhow::Result { + Ok(self .chain .get_wallets_config() .context("get_wallets_config()")? - .governor - .private_key - .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?; - let governor = governor.with_chain_id(self.genesis.l2_chain_id.as_u64()); + .governor) + } + + fn signer(&self, wallet: LocalWallet) -> anyhow::Result> { + let wallet = wallet.with_chain_id(self.genesis.l2_chain_id.as_u64()); let provider = self.provider().context("provider()")?; - let signer = SignerMiddleware::new(provider, governor.clone()); + let signer = SignerMiddleware::new(provider, wallet.clone()); // Allows us to send next transaction without waiting for the previous to complete. - let signer = NonceManagerMiddleware::new(signer, governor.address()); + let signer = NonceManagerMiddleware::new(signer, wallet.address()); Ok(Arc::new(signer)) } @@ -279,10 +280,25 @@ impl Setup { let provider = self.provider().context("provider()")?; let block_id = self.last_block(&provider).await.context("last_block()")?; let governor = self.governor().context("governor()")?; + let signer = self.signer( + governor + .private_key + .clone() + .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?, + )?; let consensus_registry = self - .consensus_registry(governor.clone()) + .consensus_registry(signer.clone()) .context("consensus_registry()")?; - let mut multicall = self.multicall(governor.clone()).context("multicall()")?; + let mut multicall = self.multicall(signer).context("multicall()")?; + + let owner = consensus_registry.owner().call().await.context("owner()")?; + if owner != governor.address { + anyhow::bail!( + "governor ({:#x}) is different than the consensus registry owner ({:#x})", + governor.address, + owner + ); + } // Fetch contract state. let n: usize = consensus_registry From 87654dc229248e246daf0ec4281887306ee43d4e Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 11 Oct 2024 16:43:26 +0300 Subject: [PATCH 051/140] =?UTF-8?q?test(vm):=20Add=20tests=20for=20EVM=20e?= =?UTF-8?q?mulator=20=E2=80=93=20multivm=20(#3045)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds unit tests for the `multivm` crate testing a mock EVM emulator. ## Why ❔ Ensures that EVM emulation will work as expected. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- Cargo.lock | 1 + core/lib/multivm/Cargo.toml | 1 + .../versions/vm_latest/tests/evm_emulator.rs | 453 +++++++++++++++++- core/node/consensus/src/registry/tests.rs | 2 +- core/node/consensus/src/tests/attestation.rs | 4 +- .../contracts/mock-evm/mock-evm.sol | 159 +++++- 6 files changed, 602 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bd9f2d5ef28..7b1a268afef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10488,6 +10488,7 @@ dependencies = [ "itertools 0.10.5", "once_cell", "pretty_assertions", + "test-casing", "thiserror", "tracing", "vise", diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index ab418d24cd1..e49086a6b8b 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -42,5 +42,6 @@ ethabi.workspace = true [dev-dependencies] assert_matches.workspace = true pretty_assertions.workspace = true +test-casing.workspace = true zksync_test_account.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs index ca8157b170d..4316558eda2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -1,28 +1,44 @@ +use std::collections::HashMap; + use ethabi::Token; -use zksync_contracts::read_bytecode; -use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; -use zksync_types::{get_code_key, get_known_code_key, Execute, H256}; -use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256}; -use zksync_vm_interface::VmInterfaceExt; +use test_casing::{test_casing, Product}; +use zksync_contracts::{load_contract, read_bytecode, SystemContractCode}; +use zksync_system_constants::{ + CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, +}; +use zksync_test_account::TxType; +use zksync_types::{ + get_code_key, get_known_code_key, + utils::{key_for_eth_balance, storage_key_for_eth_balance}, + AccountTreeId, Address, Execute, StorageKey, H256, U256, +}; +use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; use crate::{ - interface::{storage::InMemoryStorage, TxExecutionMode}, + interface::{ + storage::InMemoryStorage, TxExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, + }, versions::testonly::default_system_env, - vm_latest::{tests::tester::VmTesterBuilder, utils::hash_evm_bytecode, HistoryEnabled}, + vm_latest::{ + tests::tester::{VmTester, VmTesterBuilder}, + utils::hash_evm_bytecode, + HistoryEnabled, + }, }; const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; const MOCK_KNOWN_CODE_STORAGE_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockKnownCodeStorage.json"; +const MOCK_EMULATOR_PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockEvmEmulator.json"; +const RECURSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/NativeRecursiveContract.json"; +const INCREMENTING_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/IncrementingContract.json"; -#[test] -fn tracing_evm_contract_deployment() { +fn override_system_contracts(storage: &mut InMemoryStorage) { let mock_deployer = read_bytecode(MOCK_DEPLOYER_PATH); let mock_deployer_hash = hash_bytecode(&mock_deployer); let mock_known_code_storage = read_bytecode(MOCK_KNOWN_CODE_STORAGE_PATH); let mock_known_code_storage_hash = hash_bytecode(&mock_known_code_storage); - // Override - let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); storage.set_value( get_known_code_key(&mock_deployer_hash), @@ -38,6 +54,81 @@ fn tracing_evm_contract_deployment() { ); storage.store_factory_dep(mock_deployer_hash, mock_deployer); storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); +} + +#[derive(Debug)] +struct EvmTestBuilder { + deploy_emulator: bool, + storage: InMemoryStorage, + evm_contract_addresses: Vec

, +} + +impl EvmTestBuilder { + fn new(deploy_emulator: bool, evm_contract_address: Address) -> Self { + Self { + deploy_emulator, + storage: InMemoryStorage::with_system_contracts(hash_bytecode), + evm_contract_addresses: vec![evm_contract_address], + } + } + + fn with_mock_deployer(mut self) -> Self { + override_system_contracts(&mut self.storage); + self + } + + fn with_evm_address(mut self, address: Address) -> Self { + self.evm_contract_addresses.push(address); + self + } + + fn build(self) -> VmTester { + let mock_emulator = read_bytecode(MOCK_EMULATOR_PATH); + let mut storage = self.storage; + let mut system_env = default_system_env(); + if self.deploy_emulator { + let evm_bytecode: Vec<_> = (0..=u8::MAX).collect(); + let evm_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + storage.set_value( + get_known_code_key(&evm_bytecode_hash), + H256::from_low_u64_be(1), + ); + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), evm_bytecode_hash); + } + + system_env.base_system_smart_contracts.evm_emulator = Some(SystemContractCode { + hash: hash_bytecode(&mock_emulator), + code: bytes_to_be_words(mock_emulator), + }); + } else { + let emulator_hash = hash_bytecode(&mock_emulator); + storage.set_value(get_known_code_key(&emulator_hash), H256::from_low_u64_be(1)); + storage.store_factory_dep(emulator_hash, mock_emulator); + + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), emulator_hash); + // Set `isUserSpace` in the emulator storage to `true`, so that it skips emulator-specific checks + storage.set_value( + StorageKey::new(AccountTreeId::new(evm_address), H256::zero()), + H256::from_low_u64_be(1), + ); + } + } + + VmTesterBuilder::new(HistoryEnabled) + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build() + } +} + +#[test] +fn tracing_evm_contract_deployment() { + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + override_system_contracts(&mut storage); let mut system_env = default_system_env(); // The EVM emulator will not be accessed, so we set it to a dummy value. @@ -74,3 +165,343 @@ fn tracing_evm_contract_deployment() { evm_bytecode ); } + +#[test] +fn mock_emulator_basics() { + let called_address = Address::repeat_byte(0x23); + let mut vm = EvmTestBuilder::new(true, called_address).build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(called_address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +const RECIPIENT_ADDRESS: Address = Address::repeat_byte(0x12); + +/// `deploy_emulator = false` here and below tests the mock emulator as an ordinary contract (i.e., sanity-checks its logic). +#[test_casing(2, [false, true])] +#[test] +fn mock_emulator_with_payment(deploy_emulator: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let mut vm = EvmTestBuilder::new(deploy_emulator, RECIPIENT_ADDRESS).build(); + + let mut current_balance = U256::zero(); + for i in 1_u64..=5 { + let transferred_value = (1_000_000_000 * i).into(); + let vm_result = test_payment( + &mut vm, + &mock_emulator_abi, + &mut current_balance, + transferred_value, + ); + + let balance_storage_logs = vm_result.logs.storage_logs.iter().filter_map(|log| { + (*log.log.key.address() == L2_BASE_TOKEN_ADDRESS) + .then_some((*log.log.key.key(), h256_to_u256(log.log.value))) + }); + let balances: HashMap<_, _> = balance_storage_logs.collect(); + assert_eq!( + balances[&key_for_eth_balance(&RECIPIENT_ADDRESS)], + current_balance + ); + } +} + +fn test_payment( + vm: &mut VmTester, + mock_emulator_abi: ðabi::Contract, + balance: &mut U256, + transferred_value: U256, +) -> VmExecutionResultAndLogs { + *balance += transferred_value; + let test_payment_fn = mock_emulator_abi.function("testPayment").unwrap(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(RECIPIENT_ADDRESS), + calldata: test_payment_fn + .encode_input(&[Token::Uint(transferred_value), Token::Uint(*balance)]) + .unwrap(), + value: transferred_value, + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + vm_result +} + +#[test_casing(4, Product(([false, true], [false, true])))] +#[test] +fn mock_emulator_with_recursion(deploy_emulator: bool, is_external: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(deploy_emulator, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + let test_recursion_fn = mock_emulator_abi + .function(if is_external { + "testExternalRecursion" + } else { + "testRecursion" + }) + .unwrap(); + let mut expected_value = U256::one(); + let depth = 50_u32; + for i in 2..=depth { + expected_value *= i; + } + + let factory_deps = if is_external { + vec![read_bytecode(RECURSIVE_CONTRACT_PATH)] + } else { + vec![] + }; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(recipient_address), + calldata: test_recursion_fn + .encode_input(&[Token::Uint(depth.into()), Token::Uint(expected_value)]) + .unwrap(), + value: 0.into(), + factory_deps, + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn calling_to_mock_emulator_from_native_contract() { + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(true, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(RECURSIVE_CONTRACT_PATH); + let native_contract_abi = load_contract(RECURSIVE_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx( + &native_contract, + Some(&[Token::Address(recipient_address)]), + TxType::L2, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Call from the native contract to the EVM emulator. + let test_fn = native_contract_abi.function("recurse").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: test_fn.encode_input(&[Token::Uint(50.into())]).unwrap(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test] +fn mock_emulator_with_deployment() { + let contract_address = Address::repeat_byte(0xaa); + let mut vm = EvmTestBuilder::new(true, contract_address) + .with_mock_deployer() + .build(); + let account = &mut vm.rich_accounts[0]; + + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let new_evm_bytecode = vec![0xfe; 96]; + let new_evm_bytecode_hash = hash_evm_bytecode(&new_evm_bytecode); + + let test_fn = mock_emulator_abi.function("testDeploymentAndCall").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: test_fn + .encode_input(&[ + Token::FixedBytes(new_evm_bytecode_hash.0.into()), + Token::Bytes(new_evm_bytecode.clone()), + ]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + + let factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!( + factory_deps, + HashMap::from([(new_evm_bytecode_hash, new_evm_bytecode)]) + ); +} + +#[test] +fn mock_emulator_with_delegate_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testDelegateCall").unwrap(); + // Delegate to the native contract from EVM. + test_delegate_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address); + // Delegate to EVM from the native contract. + test_delegate_call(&mut vm, test_fn, deploy_tx.address, evm_contract_address); + // Delegate to EVM from EVM. + test_delegate_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + ); +} + +fn test_delegate_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn.encode_input(&[Token::Address(to)]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn mock_emulator_with_static_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + // Set differing read values for tested contracts. The slot index is defined in the contract. + let value_slot = H256::from_low_u64_be(0x123); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(evm_contract_address), value_slot), + H256::from_low_u64_be(100), + ); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(other_evm_contract_address), value_slot), + H256::from_low_u64_be(200), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testStaticCall").unwrap(); + // Call to the native contract from EVM. + test_static_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address, 0); + // Call to EVM from the native contract. + test_static_call( + &mut vm, + test_fn, + deploy_tx.address, + evm_contract_address, + 100, + ); + // Call to EVM from EVM. + test_static_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + 200, + ); +} + +fn test_static_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, + expected_value: u64, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn + .encode_input(&[Token::Address(to), Token::Uint(expected_value.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs index 33392a7f206..89afc20e1d5 100644 --- a/core/node/consensus/src/registry/tests.rs +++ b/core/node/consensus/src/registry/tests.rs @@ -80,7 +80,7 @@ async fn test_attester_committee() { .wrap("wait_for_batch_info()")?; // Read the attester committee using the vm. - let batch = attester::BatchNumber(node.last_batch().0.into()); + let batch = attester::BatchNumber(node.last_batch().0); assert_eq!( Some(committee), registry diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index bd3886bd4c8..2701a986e9e 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -79,7 +79,7 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { let status = fetch_status().await?; assert_eq!( status.next_batch_to_attest, - attester::BatchNumber(first_batch.0.into()) + attester::BatchNumber(first_batch.0) ); tracing::info!("Insert a cert"); @@ -237,7 +237,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { } tracing::info!("Wait for the batches to be attested"); - let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); + let want_last = attester::BatchNumber(validator.last_sealed_batch().0); validator_pool .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; diff --git a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol index 5f4de59681f..baa0d37b753 100644 --- a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol +++ b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol @@ -68,7 +68,7 @@ contract MockContractDeployer { Version1 } - address constant CODE_ORACLE_ADDR = address(0x8012); + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); MockKnownCodeStorage constant KNOWN_CODE_STORAGE_CONTRACT = MockKnownCodeStorage(address(0x8004)); /// The returned value is obviously incorrect in the general case, but works well enough when called by the bootloader. @@ -78,15 +78,166 @@ contract MockContractDeployer { /// Replaces real deployment with publishing a surrogate EVM "bytecode". /// @param _salt bytecode hash - /// @param _bytecodeHash ignored, since it's not possible to set arbitrarily /// @param _input bytecode to publish function create( bytes32 _salt, - bytes32 _bytecodeHash, + bytes32, // ignored, since it's not possible to set arbitrarily bytes calldata _input ) external payable returns (address) { KNOWN_CODE_STORAGE_CONTRACT.setEVMBytecodeHash(_salt); KNOWN_CODE_STORAGE_CONTRACT.publishEVMBytecode(_input); - return address(0); + address newAddress = address(uint160(msg.sender) + 1); + ACCOUNT_CODE_STORAGE_CONTRACT.storeAccountConstructedCodeHash(newAddress, _salt); + return newAddress; + } +} + +interface IAccountCodeStorage { + function getRawCodeHash(address _address) external view returns (bytes32); + function storeAccountConstructedCodeHash(address _address, bytes32 _hash) external; +} + +interface IRecursiveContract { + function recurse(uint _depth) external returns (uint); +} + +/// Native incrementing library. Not actually a library to simplify deployment. +contract IncrementingContract { + // Should not collide with other storage slots + uint constant INCREMENTED_SLOT = 0x123; + + function getIncrementedValue() public view returns (uint _value) { + assembly { + _value := sload(INCREMENTED_SLOT) + } + } + + function increment(address _thisAddress, uint _thisBalance) external { + require(msg.sender == tx.origin, "msg.sender not retained"); + require(address(this) == _thisAddress, "this address"); + require(address(this).balance == _thisBalance, "this balance"); + assembly { + sstore(INCREMENTED_SLOT, add(sload(INCREMENTED_SLOT), 1)) + } + } + + /// Tests delegation to a native or EVM contract at the specified target. + function testDelegateCall(address _target) external { + uint valueSnapshot = getIncrementedValue(); + (bool success, ) = _target.delegatecall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(success, "delegatecall reverted"); + require(getIncrementedValue() == valueSnapshot + 1, "invalid value"); + } + + function testStaticCall(address _target, uint _expectedValue) external { + (bool success, bytes memory rawValue) = _target.staticcall(abi.encodeCall( + this.getIncrementedValue, + () + )); + require(success, "static call reverted"); + (uint value) = abi.decode(rawValue, (uint)); + require(value == _expectedValue, "value mismatch"); + + (success, ) = _target.staticcall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(!success, "staticcall should've reverted"); + } +} + +uint constant EVM_EMULATOR_STIPEND = 1 << 30; + +/** + * Mock EVM emulator used in low-level tests. + */ +contract MockEvmEmulator is IRecursiveContract, IncrementingContract { + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); + + /// Set to `true` for testing logic sanity. + bool isUserSpace; + + modifier validEvmEntry() { + if (!isUserSpace) { + require(gasleft() >= EVM_EMULATOR_STIPEND, "no stipend"); + // Fetch bytecode for the executed contract. + bytes32 bytecodeHash = ACCOUNT_CODE_STORAGE_CONTRACT.getRawCodeHash(address(this)); + require(bytecodeHash != bytes32(0), "called contract not deployed"); + uint bytecodeVersion = uint(bytecodeHash) >> 248; + require(bytecodeVersion == 2, "non-EVM bytecode"); + + // Check that members of the current address are well-defined. + require(address(this).code.length != 0, "invalid code"); + require(address(this).codehash == bytecodeHash, "bytecode hash mismatch"); + } + _; + } + + function testPayment(uint _expectedValue, uint _expectedBalance) public payable validEvmEntry { + require(msg.value == _expectedValue, "unexpected msg.value"); + require(address(this).balance == _expectedBalance, "unexpected balance"); + } + + IRecursiveContract recursionTarget; + + function recurse(uint _depth) public validEvmEntry returns (uint) { + require(gasleft() < 2 * EVM_EMULATOR_STIPEND, "stipend provided multiple times"); + + if (_depth <= 1) { + return 1; + } else { + IRecursiveContract target = (address(recursionTarget) == address(0)) ? this : recursionTarget; + // The real emulator limits amount of gas when performing far calls by EVM gas, so we emulate this behavior as well. + uint gasToSend = isUserSpace ? gasleft() : (gasleft() - EVM_EMULATOR_STIPEND); + return target.recurse{gas: gasToSend}(_depth - 1) * _depth; + } + } + + function testRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + require(recurse(_depth) == _expectedValue, "incorrect recursion"); + } + + function testExternalRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + recursionTarget = new NativeRecursiveContract(IRecursiveContract(this)); + uint returnedValue = recurse(_depth); + recursionTarget = this; // This won't work on revert, but for tests, it's good enough + require(returnedValue == _expectedValue, "incorrect recursion"); + } + + MockContractDeployer constant CONTRACT_DEPLOYER_CONTRACT = MockContractDeployer(address(0x8006)); + + /// Emulates EVM contract deployment and a subsequent call to it in a single transaction. + function testDeploymentAndCall(bytes32 _evmBytecodeHash, bytes calldata _evmBytecode) external validEvmEntry { + IRecursiveContract newContract = IRecursiveContract(CONTRACT_DEPLOYER_CONTRACT.create( + _evmBytecodeHash, + _evmBytecodeHash, + _evmBytecode + )); + require(uint160(address(newContract)) == uint160(address(this)) + 1, "unexpected address"); + require(address(newContract).code.length > 0, "contract code length"); + require(address(newContract).codehash != bytes32(0), "contract code hash"); + + uint gasToSend = gasleft() - EVM_EMULATOR_STIPEND; + require(newContract.recurse{gas: gasToSend}(5) == 120, "unexpected recursive result"); + } + + fallback() external validEvmEntry { + require(msg.data.length == 0, "unsupported call"); + } +} + +contract NativeRecursiveContract is IRecursiveContract { + IRecursiveContract target; + + constructor(IRecursiveContract _target) { + target = _target; + } + + function recurse(uint _depth) external returns (uint) { + require(gasleft() < EVM_EMULATOR_STIPEND, "stipend spilled to native contract"); + return (_depth <= 1) ? 1 : target.recurse(_depth - 1) * _depth; } } From 6dd486555775b3dec0c0de080999a1d73aab25ce Mon Sep 17 00:00:00 2001 From: kelemeno <34402761+kelemeno@users.noreply.github.com> Date: Fri, 11 Oct 2024 16:26:33 +0100 Subject: [PATCH 052/140] docs: better interop docs (#3079) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- docs/specs/README.md | 2 +- docs/specs/zk_chains/README.md | 2 +- docs/specs/zk_chains/gateway.md | 1 + docs/specs/zk_chains/hyperbridges.md | 41 -------- docs/specs/zk_chains/interop.md | 49 +++++++++ docs/specs/zk_chains/shared_bridge.md | 143 ++++++++++++++------------ 6 files changed, 127 insertions(+), 111 deletions(-) create mode 100644 docs/specs/zk_chains/gateway.md delete mode 100644 docs/specs/zk_chains/hyperbridges.md create mode 100644 docs/specs/zk_chains/interop.md diff --git a/docs/specs/README.md b/docs/specs/README.md index 1f163bf7845..d0b087ae93e 100644 --- a/docs/specs/README.md +++ b/docs/specs/README.md @@ -33,4 +33,4 @@ 1. [ZK Chain ecosystem](./zk_chains/README.md) - [Overview](./zk_chains/overview.md) - [Shared Bridge](./zk_chains/shared_bridge.md) - - [Hyperbridges](./zk_chains/hyperbridges.md) + - [Interop](./zk_chains/interop.md) diff --git a/docs/specs/zk_chains/README.md b/docs/specs/zk_chains/README.md index 4de575899dd..ce0a7c311a2 100644 --- a/docs/specs/zk_chains/README.md +++ b/docs/specs/zk_chains/README.md @@ -2,4 +2,4 @@ - [Overview](./overview.md) - [Shared Bridge](./shared_bridge.md) -- [Hyperbridges](./hyperbridges.md) +- [Interop](./interop.md) diff --git a/docs/specs/zk_chains/gateway.md b/docs/specs/zk_chains/gateway.md new file mode 100644 index 00000000000..f4ee68e242e --- /dev/null +++ b/docs/specs/zk_chains/gateway.md @@ -0,0 +1 @@ +# Gateway diff --git a/docs/specs/zk_chains/hyperbridges.md b/docs/specs/zk_chains/hyperbridges.md deleted file mode 100644 index 614fe61427e..00000000000 --- a/docs/specs/zk_chains/hyperbridges.md +++ /dev/null @@ -1,41 +0,0 @@ -# Hyperbridges - -## Introduction - -In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized -that the core feature is hyperbridging, but we did not outline the hyperbridges themselves. This is because hyperbridges -are mostly L2 contracts. In this document we describe what hyperbridges are, and specify the necessary infrastructure. - -### Hyperbridge description - -Hyperbridges are trustless and cheap general native bridges between ZK Chains, allowing cross-chain function calls. -Trustlessness is achieved by relying on the main ZK Chain bridge to send a compressed message to L1, which is then sent -to and expanded on the destination ZK Chain. - -Technically they are a system of smart contracts that build on top of the enshrined L1<>L2 validating bridges, and can -interpret messages sent from L2 to L2 by verifying Merkle proofs. They are built alongside the protocol, they can -transfer the native asset of the ecosystem, and they can be used for asynchronous function calls between ZK Chains. - -![Hyperbridges](./img/hyperbridges.png) - -The trustless nature of hyperbridges allows the ecosystem to resemble a single VM. To illustrate imagine a new ZK Chain -joining the ecosystem. We will want ether/Dai/etc. to be accessible on this ZK Chain. This can be done automatically. -There will be a central erc20 deployer contract in the ecosystem, which will deploy the new ERC20 contract via the -hyperbridge. After the contract is deployed it will be able to interact other Dai contracts in the ecosystem. - -### High Level design - -![Hyperbridging](./img/hyperbridging.png) - -### L1 - -For the larger context see the [Shared Bridge](./shared_bridge.md) document, here we will focus on - -- HyperMailbox (part of Bridgehub). Contains the Hyperroot, root of Merkle tree of Hyperlogs. Hyperlogs are the L2->L1 - SysLogs that record the sent hyperbridge messages from the L2s. - -### L2 Contracts - -- Outbox system contract. It collects the hyperbridge txs into the hyperlog of the ZK Chain. -- Inbox system contract. This is where the hyperroot is imported and sent to L1 for settlement. Merkle proofs are - verified here, tx calls are started from here, nullifiers are stored here (add epochs later) diff --git a/docs/specs/zk_chains/interop.md b/docs/specs/zk_chains/interop.md new file mode 100644 index 00000000000..947742909b8 --- /dev/null +++ b/docs/specs/zk_chains/interop.md @@ -0,0 +1,49 @@ +# Interop + +## Introduction + +In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized +that the core feature is interop. Interop happens via the same L1->L2 interface as described in the L1SharedBridge doc. +There is (with the interop upgrade) a Bridgehub, AssetRouter, NativeTokenVault and Nullifier deployed on every L2, and +they serve the same feature as their L1 counterparts. Namely: + +- The Bridgehub is used to start the transaction. +- The AssetRouter and NativeTokenVault are the bridge contract that handle the tokens. +- The Nullifier is used to prevent reexecution of xL2 txs. + +### Interop process + +![Interop](./img/hyperbridging.png) + +The interop process has 7 main steps, each with its substeps: + +1. Starting the transaction on the sending chain + + - The user/calls calls the Bridgehub contract. If they want to use a bridge they call + `requestL2TransactionTwoBridges`, if they want to make a direct call they call `requestL2TransactionDirect` + function. + - The Bridgehub collects the base token fees necessary for the interop tx to be processed on the destination chain, + and if using the TwoBridges method the calldata and the destination contract ( for more data see Shared bridge + doc). + - The Bridgehub emits a `NewPriorityRequest` event, this is the same as the one in our Mailbox contract. This event + specifies the xL2 txs, which uses the same format as L1->L2 txs. This event can be picked up and used to receive + the txs. + - This new priority request is sent as an L2->L1 message, it is included in the chains merkle tree of emitted txs. + +2. The chain settles its proof on L1 or the Gateway, whichever is used as the settlement layer for the chain. +3. On the Settlement Layer (SL), the MessageRoot is updated in the MessageRoot contract. The new data includes all the + L2->L1 messages that are emitted from the settling chain. +4. The receiving chain picks up the updated MessgeRoot from the Settlement Layer. +5. Now the xL2 txs can be imported on the destination chain. Along with the txs, a merkle proof needs to be sent to link + it to the MessageRoot. +6. Receiving the tx on the destination chain + + - On the destination chain the xL2 txs is verified. This means the merkle proof is checked agains the MessageRoot. + This shows the the xL2 txs was indeed sent. + - After this the txs can be executed. The tx hash is stored in the L2Nullifier contract, so that the txs cannot be + replayed. + - The specified contract is called, with the calldata, and the message sender = + `keccak256(originalMessageSender, originChainId) >> 160`. This is to prevent the collision of the msg.sender + addresses. + +7. The destination chain settles on the SL and the MessageRoot that it imported is checked. diff --git a/docs/specs/zk_chains/shared_bridge.md b/docs/specs/zk_chains/shared_bridge.md index c464a7a154b..b43d3082b62 100644 --- a/docs/specs/zk_chains/shared_bridge.md +++ b/docs/specs/zk_chains/shared_bridge.md @@ -17,7 +17,7 @@ If you want to know more about ZK Chains, check this We want to create a system where: - ZK Chains should be launched permissionlessly within the ecosystem. -- Hyperbridges should enable unified liquidity for assets across the ecosystem. +- Interop should enable unified liquidity for assets across the ecosystem. - Multi-chain smart contracts need to be easy to develop, which means easy access to traditional bridges, and other supporting architecture. @@ -58,20 +58,19 @@ be able to leverage them when available). #### Bridgehub - Acts as a hub for bridges, so that they have a single point of communication with all ZK Chain contracts. This allows - L1 assets to be locked in the same contract for all ZK Chains, including L3s and validiums. The `Bridgehub` also - implements the following: + L1 assets to be locked in the same contract for all ZK Chains. The `Bridgehub` also implements the following features: - `Registry` This is where ZK Chains can register, starting in a permissioned manner, but with the goal to be - permissionless in the future. This is where their `chainID` is determined. L3s will also register here. This - `Registry` is also where State Transition contracts should register. Each chain has to specify its desired ST when - registering (Initially, only one will be available). + permissionless in the future. This is where their `chainID` is determined. Chains on Gateway will also register here. + This `Registry` is also where Chain Type Manager contracts should register. Each chain has to specify its desired CTM + when registering (Initially, only one will be available). ``` function newChain( uint256 _chainId, - address _stateTransition + address _chainTypeManager ) external returns (uint256 chainId); - function newStateTransition(address _stateTransition) external; + function newChainTypeManager(address _chainTypeManager) external; ``` - `BridgehubMailbox` routes messages to the Diamond proxy’s Mailbox facet based on chainID @@ -79,43 +78,73 @@ be able to leverage them when available). - Same as the current zkEVM [Mailbox](https://github.com/matter-labs/era-contracts/blob/main/l1-contracts/contracts/zksync/facets/Mailbox.sol), just with chainId, - - Ether needs to be deposited and withdrawn from here. - This is where L2 transactions can be requested. ``` - function requestL2Transaction( - uint256 _chainId, - address _contractL2, - uint256 _l2Value, - bytes calldata _calldata, - uint256 _l2GasLimit, - uint256 _l2GasPerPubdataByteLimit, - bytes[] calldata _factoryDeps, - address _refundRecipient - ) public payable override returns (bytes32 canonicalTxHash) { - address proofChain = bridgeheadStorage.proofChain[_chainId]; - canonicalTxHash = IProofChain(proofChain).requestL2TransactionBridgehead( - _chainId, - msg.value, - msg.sender, - _contractL2, - _l2Value, - _calldata, - _l2GasLimit, - _l2GasPerPubdataByteLimit, - _factoryDeps, - _refundRecipient - ); - } + function requestL2TransactionTwoBridges( + L2TransactionRequestTwoBridgesOuter calldata _request + ) ``` -- `Hypermailbox` - - This will allow general message passing (L2<>L2, L2<>L3, etc). This is where the `Mailbox` sends the `Hyperlogs`. - `Hyperlogs` are commitments to these messages sent from a single ZK Chain. `Hyperlogs` are aggregated into a - `HyperRoot` in the `HyperMailbox`. - - This component has not been implemented yet + ``` + struct L2TransactionRequestTwoBridgesOuter { + uint256 chainId; + uint256 mintValue; + uint256 l2Value; + uint256 l2GasLimit; + uint256 l2GasPerPubdataByteLimit; + address refundRecipient; + address secondBridgeAddress; + uint256 secondBridgeValue; + bytes secondBridgeCalldata; + } + ``` -#### Main asset shared bridges +``` + struct L2TransactionRequestTwoBridgesInner { + bytes32 magicValue; + address l2Contract; + bytes l2Calldata; + bytes[] factoryDeps; + bytes32 txDataHash; +} +``` + +- The `requestL2TransactionTwoBridges` function should be used most of the time when bridging to a chain ( the exeption + is when the user bridges directly to a contract on the L2, without using a bridge contract on L1). The logic of it is + the following: + + - The user wants to bridge to chain with the provided `L2TransactionRequestTwoBridgesOuter.chainId`. + - Two bridges are called, the baseTokenBridge (i.e. the L1SharedBridge or L1AssetRouter after the Gateway upgrade) and + an arbitrary second bridge. The Bridgehub will provide the original caller address to both bridges, which can + request that the appropriate amount of tokens are transferred from the caller to the bridge. The caller has to set + the appropriate allowance for both bridges. (Often the bridges coincide, but they don't have to). + - The `L2TransactionRequestTwoBridgesOuter.mintValue` is the amount of baseTokens that will be minted on L2. This is + the amount of tokens that the baseTokenBridge will request from the user. If the baseToken is Eth, it will be + forwarded to the baseTokenBridge. + - The `L2TransactionRequestTwoBridgesOuter.l2Value` is the amount of tokens that will be deposited on L2. The second + bridge and the Mailbox receives this as an input (although our second bridge does not use the value). + - The `L2TransactionRequestTwoBridgesOuter.l2GasLimit` is the maximum amount of gas that will be spent on L2 to + complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.l2GasPerPubdataByteLimit` is the maximum amount of gas per pubdata byte + that will be spent on L2 to complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.refundRecipient` is the address that will be refunded for the gas spent on + L2. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeAddress` is the address of the second bridge that will be + called. This is the arbitrary address that is called from the Bridgehub. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeValue` is the amount of tokens that will be deposited on L2. + The second bridge receives this value as the baseToken (i.e. Eth on L1). + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeCalldata` is the calldata that will be passed to the second + bridge. This is the arbitrary calldata that is passed from the Bridgehub to the second bridge. + - The secondBridge returns the `L2TransactionRequestTwoBridgesInner` struct to the Bridgehub. This is also passed to + the Mailbox as input. This is where the destination contract, calldata, factoryDeps are determined on the L2. + + This setup allows the user to bridge the baseToken of the origin chain A to a chain B with some other baseToken, by + specifying the A's token in the secondBridgeValue, which will be minted on the destination chain as an ERC20 token, + and specifying the amount of B's token in the mintValue, which will be minted as the baseToken and used to cover the + gas costs. + +#### Main asset shared bridges L2TransactionRequestTwoBridgesInner - Some assets have to be natively supported (ETH, WETH) and it also makes sense to support some generally accepted token standards (ERC20 tokens), as this makes it easy to bridge those tokens (and ensures a single version of them exists on @@ -147,25 +176,18 @@ be able to leverage them when available). ); ``` -This topic is now covered more thoroughly by the Custom native token discussion. - -[Custom native token compatible with Hyperbridging](https://www.notion.so/Custom-native-token-compatible-with-Hyperbridging-54e190a1a76f44248cf84a38304a0641?pvs=21) +#### Chain Type Manager -#### State Transition - -- `StateTransition` A state transition manages proof verification and DA for multiple chains. It also implements the +- `ChainTypeManager` A chain type manager manages proof verification and DA for multiple chains. It also implements the following functionalities: - - `StateTransitionRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same - for all chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s - `Registry`. At registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK - Chain. + - `ChainTypeRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same for all + chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s `Registry`. At + registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK Chain. - `Facets` and `Verifier` are shared across chains that relies on the same ST: `Base`, `Executor` , `Getters`, `Admin` , `Mailbox.`The `Verifier` is the contract that actually verifies the proof, and is called by the `Executor`. - Upgrade Mechanism The system requires all chains to be up-to-date with the latest implementation, so whenever an update is needed, we have to “force” each chain to update, but due to decentralization, we have to give each chain a - time frame (more information in the - [Upgrade Mechanism](https://www.notion.so/ZK-Stack-shared-bridge-alpha-version-a37c4746f8b54fb899d67e474bfac3bb?pvs=21) - section). This is done in the update mechanism contract, this is where the bootloader and system contracts are + time frame. This is done in the update mechanism contract, this is where the bootloader and system contracts are published, and the `ProposedUpgrade` is stored. Then each chain can call this upgrade for themselves as needed. After the deadline is over, the not-updated chains are frozen, that is, cannot post new proofs. Frozen chains can unfreeze by updating their proof system. @@ -180,6 +202,7 @@ This topic is now covered more thoroughly by the Custom native token discussion. - A chain might implement its own specific consensus mechanism. This needs its own contracts. Only this contract will be able to submit proofs to the State Transition contract. +- DA contracts. - Currently, the `ValidatorTimelock` is an example of such a contract. ### Components interactions @@ -199,22 +222,6 @@ features required to process proofs. The chain ID is set in the VM in a special -#### WETH Contract - -Ether, the native gas token is part of the core system contracts, so deploying it is not necessary. But WETH is just a -smart contract, it needs to be deployed and initialised. This happens from the L1 WETH bridge. This deploys on L2 the -corresponding bridge and ERC20 contract. This is deployed from L1, but the L2 address is known at deployment time. - -![deployWeth.png](./img/deployWeth.png) - -#### Deposit WETH - -The user can deposit WETH into the ecosystem using the WETH bridge on L1. The destination chain ID has to be specified. -The Bridgehub unwraps the WETH, and keeps the ETH, and send a message to the destination L2 to mint WETH to the -specified address. - -![depositWeth.png](./img/depositWeth.png) - --- ### Common Standards and Upgrades From 15fe5a62f03cd103afd7fa5eb03e27db25686ba9 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 14 Oct 2024 11:14:44 +0300 Subject: [PATCH 053/140] fix(api): Adapt `eth_getCode` to EVM emulator (#3073) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixes returned bytecodes with EVM bytecodes by removing the length prefix and padding suffix. ## Why ❔ Callers are interested in the original EVM bytecode rather than its transformed version. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- Cargo.lock | 10 ++ Cargo.toml | 1 + ...1717e73c5e6b063be3553d82bfecb98334980.json | 24 ---- ...35bfda52cc5bb5a4bfb11270a2a784491c967.json | 30 ++++ core/lib/dal/src/storage_web3_dal.rs | 20 ++- .../versions/vm_latest/tests/evm_emulator.rs | 11 +- .../vm_latest/tracers/evm_deploy_tracer.rs | 6 +- .../src/versions/vm_latest/utils/mod.rs | 22 +-- core/lib/utils/src/bytecode.rs | 61 ++++++++ core/node/api_server/Cargo.toml | 1 + core/node/api_server/src/testonly.rs | 25 ++++ core/node/api_server/src/utils.rs | 36 +++++ .../api_server/src/web3/namespaces/eth.rs | 21 ++- core/node/api_server/src/web3/tests/mod.rs | 134 +++++++++++++++++- 14 files changed, 338 insertions(+), 64 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json create mode 100644 core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json diff --git a/Cargo.lock b/Cargo.lock index 7b1a268afef..f9f7a88764e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1539,6 +1539,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "const-decoder" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b381abde2cdc1bc3817e394b24e05667a2dc89f37570cbd34d9c397d99e56e3f" +dependencies = [ + "compile-fmt", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -10516,6 +10525,7 @@ dependencies = [ "async-trait", "axum", "chrono", + "const-decoder", "futures 0.3.30", "governor", "hex", diff --git a/Cargo.toml b/Cargo.toml index d597f4af754..60b5628f419 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,6 +114,7 @@ blake2 = "0.10" chrono = "0.4" clap = "4.2.2" codegen = "0.2.0" +const-decoder = "0.4.0" criterion = "0.4.0" ctrlc = "3.1" dashmap = "5.5.3" diff --git a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json b/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json deleted file mode 100644 index 7245fa3059e..00000000000 --- a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode\n FROM\n (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "bytecode", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] - }, - "nullable": [ - false - ] - }, - "hash": "369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980" -} diff --git a/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json new file mode 100644 index 00000000000..20b79199165 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n bytecode_hash,\n bytecode\n FROM\n (\n SELECT\n value\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) deploy_log\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "bytecode_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "bytecode", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967" +} diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 78c1dc0c3d0..10d2cfe6152 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -15,6 +15,13 @@ use zksync_utils::h256_to_u256; use crate::{models::storage_block::ResolvedL1BatchForL2Block, Core, CoreDal}; +/// Raw bytecode information returned by [`StorageWeb3Dal::get_contract_code_unchecked()`]. +#[derive(Debug)] +pub struct RawBytecode { + pub bytecode_hash: H256, + pub bytecode: Vec, +} + #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { pub(crate) storage: &'a mut Connection<'c, Core>, @@ -234,16 +241,17 @@ impl StorageWeb3Dal<'_, '_> { &mut self, address: Address, block_number: L2BlockNumber, - ) -> DalResult>> { + ) -> DalResult> { let hashed_key = get_code_key(&address).hashed_key(); let row = sqlx::query!( r#" SELECT + bytecode_hash, bytecode FROM ( SELECT - * + value FROM storage_logs WHERE @@ -254,7 +262,7 @@ impl StorageWeb3Dal<'_, '_> { storage_logs.operation_number DESC LIMIT 1 - ) t + ) deploy_log JOIN factory_deps ON value = factory_deps.bytecode_hash WHERE value != $3 @@ -268,7 +276,11 @@ impl StorageWeb3Dal<'_, '_> { .with_arg("block_number", &block_number) .fetch_optional(self.storage) .await?; - Ok(row.map(|row| row.bytecode)) + + Ok(row.map(|row| RawBytecode { + bytecode_hash: H256::from_slice(&row.bytecode_hash), + bytecode: row.bytecode, + })) } /// Given bytecode hash, returns bytecode and L2 block number at which it was inserted. diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs index 4316558eda2..34780b73eb0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -12,7 +12,11 @@ use zksync_types::{ utils::{key_for_eth_balance, storage_key_for_eth_balance}, AccountTreeId, Address, Execute, StorageKey, H256, U256, }; -use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{ + be_words_to_bytes, + bytecode::{hash_bytecode, hash_evm_bytecode}, + bytes_to_be_words, h256_to_u256, +}; use crate::{ interface::{ @@ -21,7 +25,6 @@ use crate::{ versions::testonly::default_system_env, vm_latest::{ tests::tester::{VmTester, VmTesterBuilder}, - utils::hash_evm_bytecode, HistoryEnabled, }, }; @@ -87,7 +90,7 @@ impl EvmTestBuilder { let mut storage = self.storage; let mut system_env = default_system_env(); if self.deploy_emulator { - let evm_bytecode: Vec<_> = (0..=u8::MAX).collect(); + let evm_bytecode: Vec<_> = (0..32).collect(); let evm_bytecode_hash = hash_evm_bytecode(&evm_bytecode); storage.set_value( get_known_code_key(&evm_bytecode_hash), @@ -142,7 +145,7 @@ fn tracing_evm_contract_deployment() { .build(); let account = &mut vm.rich_accounts[0]; - let args = [Token::Bytes((0..=u8::MAX).collect())]; + let args = [Token::Bytes((0..32).collect())]; let evm_bytecode = ethabi::encode(&args); let expected_bytecode_hash = hash_evm_bytecode(&evm_bytecode); let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs index d91ee13a920..becc4f22527 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs @@ -8,16 +8,14 @@ use zk_evm_1_5_0::{ }, }; use zksync_types::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytecode::hash_evm_bytecode, bytes_to_be_words, h256_to_u256}; use zksync_vm_interface::storage::StoragePtr; use super::{traits::VmTracer, utils::read_pointer}; use crate::{ interface::{storage::WriteStorage, tracer::TracerExecutionStatus}, tracers::dynamic::vm_1_5_0::DynTracer, - vm_latest::{ - utils::hash_evm_bytecode, BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState, - }, + vm_latest::{BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState}, }; /// Tracer responsible for collecting information about EVM deploys and providing those diff --git a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs index e07d3eda7c4..aeb66755f51 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs @@ -1,11 +1,7 @@ //! Utility functions for the VM. use once_cell::sync::Lazy; -use zk_evm_1_5_0::{ - aux_structures::MemoryPage, - sha2, - zkevm_opcode_defs::{BlobSha256Format, VersionedHashLen32}, -}; +use zk_evm_1_5_0::aux_structures::MemoryPage; use zksync_types::{H256, KNOWN_CODES_STORAGE_ADDRESS}; use zksync_vm_interface::VmEvent; @@ -15,22 +11,6 @@ pub(crate) mod logs; pub mod overhead; pub mod transaction_encoding; -pub(crate) fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { - use sha2::{Digest, Sha256}; - let mut hasher = Sha256::new(); - let len = bytecode.len() as u16; - hasher.update(bytecode); - let result = hasher.finalize(); - - let mut output = [0u8; 32]; - output[..].copy_from_slice(result.as_slice()); - output[0] = BlobSha256Format::VERSION_BYTE; - output[1] = 0; - output[2..4].copy_from_slice(&len.to_be_bytes()); - - H256(output) -} - pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 2) } diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index 48bdb433020..01cce5bc34d 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -1,5 +1,6 @@ // FIXME: move to basic_types? +use zk_evm::k256::sha2::{Digest, Sha256}; use zksync_basic_types::H256; use crate::bytes_to_chunks; @@ -40,6 +41,7 @@ pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { Ok(()) } +/// Hashes the provided EraVM bytecode. pub fn hash_bytecode(code: &[u8]) -> H256 { let chunked_code = bytes_to_chunks(code); let hash = zk_evm::zkevm_opcode_defs::utils::bytecode_to_code_hash(&chunked_code) @@ -55,3 +57,62 @@ pub fn bytecode_len_in_words(bytecodehash: &H256) -> u16 { pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { bytecode_len_in_words(&bytecodehash) as usize * 32 } + +/// Bytecode marker encoded in the first byte of the bytecode hash. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u8)] +pub enum BytecodeMarker { + /// EraVM bytecode marker (1). + EraVm = 1, + /// EVM bytecode marker (2). + Evm = 2, +} + +impl BytecodeMarker { + /// Parses a marker from the bytecode hash. + pub fn new(bytecode_hash: H256) -> Option { + Some(match bytecode_hash.as_bytes()[0] { + val if val == Self::EraVm as u8 => Self::EraVm, + val if val == Self::Evm as u8 => Self::Evm, + _ => return None, + }) + } +} + +/// Hashes the provided EVM bytecode. The bytecode must be padded to an odd number of 32-byte words; +/// bytecodes stored in the known codes storage satisfy this requirement automatically. +pub fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { + validate_bytecode(bytecode).expect("invalid EVM bytecode"); + + let mut hasher = Sha256::new(); + let len = bytecode.len() as u16; + hasher.update(bytecode); + let result = hasher.finalize(); + + let mut output = [0u8; 32]; + output[..].copy_from_slice(result.as_slice()); + output[0] = BytecodeMarker::Evm as u8; + output[1] = 0; + output[2..4].copy_from_slice(&len.to_be_bytes()); + + H256(output) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytecode_markers_are_valid() { + let bytecode_hash = hash_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::EraVm) + ); + let bytecode_hash = hash_evm_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::Evm) + ); + } +} diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index d0723a9d23e..067b9b3e372 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -61,4 +61,5 @@ zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true assert_matches.workspace = true +const-decoder.workspace = true test-casing.workspace = true diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 8dc7915385a..45ed802d68f 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -2,6 +2,7 @@ use std::{collections::HashMap, iter}; +use const_decoder::Decoder; use zk_evm_1_5_0::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_contracts::{ eth_contract, get_loadnext_contract, load_contract, read_bytecode, @@ -26,6 +27,30 @@ use zksync_types::{ }; use zksync_utils::{address_to_u256, u256_to_h256}; +pub(crate) const RAW_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"00000000000000000000000000000000000000000000000000000000000001266080604052348015\ + 600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063fb5343f314604c57\ + 5b5f80fd5b604a60048036038101906046919060a6565b6066565b005b6052606f565b604051605d\ + 919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd5b5f819050919050\ + 565b6088816078565b81146091575f80fd5b50565b5f8135905060a0816081565b92915050565b5f\ + 6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092915050565b60d381\ + 6078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056fea2646970667358\ + 221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9ce4d0964736f6c63\ + 4300081a00330000000000000000000000000000000000000000000000000000" +); +pub(crate) const PROCESSED_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"6080604052348015600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063\ + fb5343f314604c575b5f80fd5b604a60048036038101906046919060a6565b6066565b005b605260\ + 6f565b604051605d919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd\ + 5b5f819050919050565b6088816078565b81146091575f80fd5b50565b5f8135905060a081608156\ + 5b92915050565b5f6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092\ + 915050565b60d3816078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056\ + fea2646970667358221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9\ + ce4d0964736f6c634300081a0033" +); + const EXPENSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; const PRECOMPILES_CONTRACT_PATH: &str = diff --git a/core/node/api_server/src/utils.rs b/core/node/api_server/src/utils.rs index 6769e773dc7..c7a1134682b 100644 --- a/core/node/api_server/src/utils.rs +++ b/core/node/api_server/src/utils.rs @@ -6,9 +6,33 @@ use std::{ time::{Duration, Instant}, }; +use anyhow::Context; use zksync_dal::{Connection, Core, DalError}; +use zksync_multivm::circuit_sequencer_api_latest::boojum::ethereum_types::U256; use zksync_web3_decl::error::Web3Error; +pub(crate) fn prepare_evm_bytecode(raw: &[u8]) -> anyhow::Result> { + // EVM bytecodes are prefixed with a big-endian `U256` bytecode length. + let bytecode_len_bytes = raw.get(..32).context("length < 32")?; + let bytecode_len = U256::from_big_endian(bytecode_len_bytes); + let bytecode_len: usize = bytecode_len + .try_into() + .map_err(|_| anyhow::anyhow!("length ({bytecode_len}) overflow"))?; + let bytecode = raw.get(32..(32 + bytecode_len)).with_context(|| { + format!( + "prefixed length ({bytecode_len}) exceeds real length ({})", + raw.len() - 32 + ) + })?; + // Since slicing above succeeded, this one is safe. + let padding = &raw[(32 + bytecode_len)..]; + anyhow::ensure!( + padding.iter().all(|&b| b == 0), + "bytecode padding contains non-zero bytes" + ); + Ok(bytecode.to_vec()) +} + /// Opens a readonly transaction over the specified connection. pub(crate) async fn open_readonly_transaction<'r>( conn: &'r mut Connection<'_, Core>, @@ -66,3 +90,15 @@ macro_rules! report_filter { ReportFilter::new($interval, &LAST_TIMESTAMP) }}; } + +#[cfg(test)] +mod tests { + use super::*; + use crate::testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}; + + #[test] + fn preparing_evm_bytecode() { + let prepared = prepare_evm_bytecode(RAW_EVM_BYTECODE).unwrap(); + assert_eq!(prepared, PROCESSED_EVM_BYTECODE); + } +} diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 44362dd098e..008c529ec63 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -12,7 +12,7 @@ use zksync_types::{ web3::{self, Bytes, SyncInfo, SyncState}, AccountTreeId, L2BlockNumber, StorageKey, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{bytecode::BytecodeMarker, u256_to_h256}; use zksync_web3_decl::{ error::Web3Error, types::{Address, Block, Filter, FilterChanges, Log, U64}, @@ -21,7 +21,7 @@ use zksync_web3_decl::{ use crate::{ execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, - utils::open_readonly_transaction, + utils::{open_readonly_transaction, prepare_evm_bytecode}, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, }; @@ -397,7 +397,22 @@ impl EthNamespace { .get_contract_code_unchecked(address, block_number) .await .map_err(DalError::generalize)?; - Ok(contract_code.unwrap_or_default().into()) + let Some(contract_code) = contract_code else { + return Ok(Bytes::default()); + }; + // Check if the bytecode is an EVM bytecode, and if so, pre-process it correspondingly. + let marker = BytecodeMarker::new(contract_code.bytecode_hash); + let prepared_bytecode = if marker == Some(BytecodeMarker::Evm) { + prepare_evm_bytecode(&contract_code.bytecode).with_context(|| { + format!( + "malformed EVM bytecode at address {address:?}, hash = {:?}", + contract_code.bytecode_hash + ) + })? + } else { + contract_code.bytecode + }; + Ok(prepared_bytecode.into()) } pub fn chain_id_impl(&self) -> U64 { diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 632e263c653..77b0b1824c7 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -31,17 +31,21 @@ use zksync_system_constants::{ }; use zksync_types::{ api, - block::{pack_block_info, L2BlockHeader}, + block::{pack_block_info, L2BlockHasher, L2BlockHeader}, get_nonce_key, l2::L2Tx, storage::get_code_key, + system_contracts::get_system_smart_contracts, tokens::{TokenInfo, TokenMetadata}, tx::IncludedTxLocation, utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, AccountTreeId, Address, L1BatchNumber, Nonce, ProtocolVersionId, StorageKey, StorageLog, H256, U256, U64, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{ + bytecode::{hash_bytecode, hash_evm_bytecode}, + u256_to_h256, +}; use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::{ client::{Client, DynClient, L2}, @@ -58,7 +62,10 @@ use zksync_web3_decl::{ }; use super::*; -use crate::web3::testonly::TestServerBuilder; +use crate::{ + testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, + web3::testonly::TestServerBuilder, +}; mod debug; mod filters; @@ -625,7 +632,7 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { fn storage_initialization(&self) -> StorageInitialization { let address = Address::repeat_byte(1); let code_key = get_code_key(&address); - let code_hash = H256::repeat_byte(2); + let code_hash = hash_bytecode(&[0; 32]); let balance_key = storage_key_for_eth_balance(&address); let logs = vec![ StorageLog::new_write_log(code_key, code_hash), @@ -1102,3 +1109,122 @@ impl HttpTest for GenesisConfigTest { async fn tracing_genesis_config() { test_http_server(GenesisConfigTest).await; } + +#[derive(Debug)] +struct GetBytecodeTest; + +impl GetBytecodeTest { + async fn insert_evm_bytecode( + connection: &mut Connection<'_, Core>, + at_block: L2BlockNumber, + address: Address, + ) -> anyhow::Result<()> { + let evm_bytecode_hash = hash_evm_bytecode(RAW_EVM_BYTECODE); + let code_log = StorageLog::new_write_log(get_code_key(&address), evm_bytecode_hash); + connection + .storage_logs_dal() + .append_storage_logs(at_block, &[code_log]) + .await?; + + let factory_deps = HashMap::from([(evm_bytecode_hash, RAW_EVM_BYTECODE.to_vec())]); + connection + .factory_deps_dal() + .insert_factory_deps(at_block, &factory_deps) + .await?; + Ok(()) + } +} + +#[async_trait] +impl HttpTest for GetBytecodeTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let genesis_evm_address = Address::repeat_byte(1); + let mut connection = pool.connection().await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(0), genesis_evm_address).await?; + + for contract in get_system_smart_contracts(false) { + let bytecode = client + .get_code(*contract.account_id.address(), None) + .await?; + assert_eq!(bytecode.0, contract.bytecode); + } + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let latest_block_variants = [ + api::BlockNumber::Pending, + api::BlockNumber::Latest, + api::BlockNumber::Committed, + ]; + let latest_block_variants = latest_block_variants.map(api::BlockIdVariant::BlockNumber); + + let genesis_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Earliest), + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(0.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + }), + ]; + for at_block in latest_block_variants + .into_iter() + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 0"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + + // Create another block with an EVM bytecode. + let new_bytecode_address = Address::repeat_byte(2); + let mut connection = pool.connection().await?; + let block_header = store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(1), new_bytecode_address).await?; + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + let bytecode = client.get_code(new_bytecode_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let new_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(1.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: block_header.hash, + }), + ]; + for at_block in latest_block_variants.into_iter().chain(new_block_variants) { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + for at_block in genesis_block_variants { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert!(bytecode.0.is_empty()); + } + + for at_block in latest_block_variants + .into_iter() + .chain(new_block_variants) + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 1"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + Ok(()) + } +} + +#[tokio::test] +async fn getting_bytecodes() { + test_http_server(GetBytecodeTest).await; +} From 114834f357421c62d596a1954fac8ce615cfde49 Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Mon, 14 Oct 2024 11:50:48 +0200 Subject: [PATCH 054/140] feat(zk_toolbox): Redesign zk_toolbox commands (#3003) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - merge `zk_supervisor` and `zk_inception`. `zk_supervisor` commands would be available in `zk_inception dev` subcommand. - rename `zk_inception` to `zkstack` - rename `zk_toolbox` to `ZK Stack CLI` (in human-readable context) and `zkstack_cli` (in "code" context, e.g. workspace name) - rename `zkup` to `zkstackup` similarly to `foundryup` and `rustup`. - remove `zks` and `zki` aliases - update all the docs in the repo and make sure that new naming is consistently used ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --------- Co-authored-by: Danil --- .githooks/pre-push | 25 +- .github/pull_request_template.md | 2 +- .github/release-please/config.json | 4 +- .github/release-please/manifest.json | 2 +- .../build-contract-verifier-template.yml | 8 +- .github/workflows/build-core-template.yml | 8 +- .github/workflows/build-local-node-docker.yml | 11 +- .github/workflows/ci-common-reusable.yml | 9 +- .github/workflows/ci-core-lint-reusable.yml | 24 +- .github/workflows/ci-core-reusable.yml | 116 +++--- .github/workflows/ci-docs-reusable.yml | 10 +- .github/workflows/ci-prover-reusable.yml | 18 +- .github/workflows/ci.yml | 4 +- .../new-build-contract-verifier-template.yml | 9 +- .github/workflows/new-build-core-template.yml | 9 +- .github/workflows/vm-perf-comparison.yml | 11 +- .github/workflows/vm-perf-to-prometheus.yml | 11 +- .gitignore | 4 +- bin/zk | 1 + bin/zkt | 17 - core/lib/utils/src/env.rs | 40 +- core/tests/recovery-test/src/index.ts | 10 +- core/tests/recovery-test/src/utils.ts | 8 +- .../tests/snapshot-recovery.test.ts | 6 +- core/tests/revert-test/tests/utils.ts | 20 +- core/tests/ts-integration/src/utils.ts | 10 +- core/tests/upgrade-test/tests/upgrade.test.ts | 4 +- core/tests/upgrade-test/tests/utils.ts | 12 +- docker/Makefile | 4 +- flake.nix | 2 +- infrastructure/zk/src/fmt.ts | 2 +- infrastructure/zk/src/lint.ts | 12 +- prover/docs/05_proving_batch.md | 48 ++- zk_toolbox/crates/zk_supervisor/Cargo.toml | 32 -- zk_toolbox/crates/zk_supervisor/README.md | 386 ------------------ zk_toolbox/crates/zk_supervisor/src/main.rs | 151 ------- zk_toolbox/zkup/README.md | 76 ---- zk_toolbox/zkup/install | 55 --- zk_toolbox/zkup/zkup | 254 ------------ {zk_toolbox => zkstack_cli}/CHANGELOG.md | 0 {zk_toolbox => zkstack_cli}/Cargo.lock | 254 ++++++------ {zk_toolbox => zkstack_cli}/Cargo.toml | 7 +- {zk_toolbox => zkstack_cli}/README.md | 88 ++-- .../crates/common/Cargo.toml | 0 .../crates/common/src/cmd.rs | 0 .../crates/common/src/config.rs | 0 .../crates/common/src/db.rs | 0 .../crates/common/src/docker.rs | 0 .../crates/common/src/ethereum.rs | 0 .../crates/common/src/external_node.rs | 0 .../crates/common/src/files.rs | 0 .../crates/common/src/forge.rs | 2 +- .../crates/common/src/git.rs | 0 .../crates/common/src/lib.rs | 0 .../crates/common/src/prerequisites.rs | 0 .../crates/common/src/prompt/confirm.rs | 0 .../crates/common/src/prompt/input.rs | 0 .../crates/common/src/prompt/mod.rs | 0 .../crates/common/src/prompt/select.rs | 0 .../crates/common/src/server.rs | 0 .../crates/common/src/term/error.rs | 0 .../crates/common/src/term/logger.rs | 2 +- .../crates/common/src/term/mod.rs | 0 .../crates/common/src/term/spinner.rs | 0 .../crates/common/src/version.rs | 0 .../crates/common/src/wallets.rs | 0 .../crates/common/src/yaml.rs | 0 .../crates/config/Cargo.toml | 0 .../crates/config/src/apps.rs | 4 +- .../crates/config/src/chain.rs | 4 +- .../crates/config/src/consensus_config.rs | 0 .../crates/config/src/consensus_secrets.rs | 0 .../crates/config/src/consts.rs | 0 .../crates/config/src/contracts.rs | 6 +- .../crates/config/src/docker_compose.rs | 4 +- .../crates/config/src/ecosystem.rs | 6 +- .../crates/config/src/explorer.rs | 4 +- .../crates/config/src/explorer_compose.rs | 4 +- .../crates/config/src/external_node.rs | 0 .../crates/config/src/file_config.rs | 0 .../forge_interface/accept_ownership/mod.rs | 4 +- .../forge_interface/deploy_ecosystem/input.rs | 10 +- .../forge_interface/deploy_ecosystem/mod.rs | 0 .../deploy_ecosystem/output.rs | 6 +- .../deploy_l2_contracts/input.rs | 4 +- .../deploy_l2_contracts/mod.rs | 0 .../deploy_l2_contracts/output.rs | 10 +- .../crates/config/src/forge_interface/mod.rs | 0 .../src/forge_interface/paymaster/mod.rs | 6 +- .../forge_interface/register_chain/input.rs | 4 +- .../src/forge_interface/register_chain/mod.rs | 0 .../forge_interface/register_chain/output.rs | 4 +- .../src/forge_interface/script_params.rs | 0 .../setup_legacy_bridge/mod.rs | 4 +- .../crates/config/src/general.rs | 0 .../crates/config/src/genesis.rs | 0 .../crates/config/src/lib.rs | 0 .../crates/config/src/manipulations.rs | 0 .../crates/config/src/portal.rs | 4 +- .../crates/config/src/secrets.rs | 0 .../crates/config/src/traits.rs | 8 +- .../crates/config/src/wallet_creation.rs | 0 .../crates/config/src/wallets.rs | 6 +- .../crates/git_version_macro/Cargo.toml | 0 .../crates/git_version_macro/src/lib.rs | 0 .../crates/types/Cargo.toml | 0 .../crates/types/src/base_token.rs | 0 .../crates/types/src/l1_network.rs | 0 .../crates/types/src/lib.rs | 0 .../crates/types/src/prover_mode.rs | 0 .../crates/types/src/token_info.rs | 0 .../crates/types/src/wallet_creation.rs | 0 .../crates/zkstack}/Cargo.toml | 27 +- .../crates/zkstack}/README.md | 2 +- .../zkstack}/abi/ConsensusRegistry.json | 0 .../crates/zkstack}/build.rs | 0 .../crates/zkstack}/src/accept_ownership.rs | 0 .../zkstack}/src/commands/args/containers.rs | 0 .../crates/zkstack}/src/commands/args/mod.rs | 0 .../zkstack}/src/commands/args/run_server.rs | 0 .../zkstack}/src/commands/args/update.rs | 0 .../commands/chain/accept_chain_ownership.rs | 0 .../commands/chain/args/build_transactions.rs | 0 .../src/commands/chain/args/create.rs | 0 .../src/commands/chain/args/genesis.rs | 0 .../src/commands/chain/args/init/configs.rs | 0 .../src/commands/chain/args/init/mod.rs | 0 .../zkstack}/src/commands/chain/args/mod.rs | 0 .../src/commands/chain/build_transactions.rs | 0 .../zkstack}/src/commands/chain/common.rs | 0 .../zkstack}/src/commands/chain/create.rs | 0 .../src/commands/chain/deploy_l2_contracts.rs | 0 .../src/commands/chain/deploy_paymaster.rs | 0 .../src/commands/chain/genesis/database.rs | 0 .../src/commands/chain/genesis/mod.rs | 0 .../src/commands/chain/genesis/server.rs | 0 .../src/commands/chain/init/configs.rs | 0 .../zkstack}/src/commands/chain/init/mod.rs | 0 .../crates/zkstack}/src/commands/chain/mod.rs | 0 .../src/commands/chain/register_chain.rs | 0 .../chain/set_token_multiplier_setter.rs | 0 .../src/commands/chain/setup_legacy_bridge.rs | 0 .../zkstack}/src/commands/consensus/conv.rs | 0 .../zkstack}/src/commands/consensus/mod.rs | 0 .../src/commands/consensus/proto/mod.proto | 0 .../src/commands/consensus/proto/mod.rs | 0 .../zkstack}/src/commands/consensus/tests.rs | 0 .../zkstack}/src/commands/containers.rs | 0 .../commands/contract_verifier/args/init.rs | 0 .../commands/contract_verifier/args/mod.rs | 0 .../contract_verifier/args/releases.rs | 0 .../src/commands/contract_verifier/init.rs | 0 .../src/commands/contract_verifier/mod.rs | 0 .../src/commands/contract_verifier/run.rs | 0 .../src/commands/dev}/commands/clean/mod.rs | 2 +- .../commands/dev}/commands/config_writer.rs | 2 +- .../src/commands/dev}/commands/contracts.rs | 2 +- .../dev}/commands/database/args/mod.rs | 2 +- .../commands/database/args/new_migration.rs | 2 +- .../dev}/commands/database/check_sqlx_data.rs | 2 +- .../commands/dev}/commands/database/drop.rs | 2 +- .../dev}/commands/database/migrate.rs | 2 +- .../commands/dev}/commands/database/mod.rs | 2 +- .../dev}/commands/database/new_migration.rs | 2 +- .../dev}/commands/database/prepare.rs | 2 +- .../commands/dev}/commands/database/reset.rs | 2 +- .../commands/dev}/commands/database/setup.rs | 2 +- .../zkstack/src/commands/dev}/commands/fmt.rs | 4 +- .../src/commands/dev}/commands/lint.rs | 6 +- .../src/commands/dev}/commands/lint_utils.rs | 0 .../zkstack/src/commands/dev}/commands/mod.rs | 0 .../dev}/commands/prover/args/insert_batch.rs | 0 .../commands/prover/args/insert_version.rs | 0 .../commands/dev}/commands/prover/args/mod.rs | 0 .../src/commands/dev}/commands/prover/info.rs | 2 +- .../dev}/commands/prover/insert_batch.rs | 2 +- .../dev}/commands/prover/insert_version.rs | 2 +- .../src/commands/dev}/commands/prover/mod.rs | 0 .../commands/send_transactions/args/mod.rs | 2 +- .../dev}/commands/send_transactions/mod.rs | 2 +- .../src/commands/dev}/commands/snapshot.rs | 2 +- .../src/commands/dev}/commands/sql_fmt.rs | 4 +- .../commands/dev}/commands/test/args/fees.rs | 2 +- .../dev}/commands/test/args/integration.rs | 4 +- .../commands/dev}/commands/test/args/mod.rs | 0 .../dev}/commands/test/args/recovery.rs | 4 +- .../dev}/commands/test/args/revert.rs | 2 +- .../commands/dev}/commands/test/args/rust.rs | 2 +- .../dev}/commands/test/args/upgrade.rs | 2 +- .../src/commands/dev}/commands/test/build.rs | 0 .../src/commands/dev}/commands/test/db.rs | 2 +- .../src/commands/dev}/commands/test/fees.rs | 2 +- .../dev}/commands/test/integration.rs | 2 +- .../dev}/commands/test/l1_contracts.rs | 2 +- .../commands/dev}/commands/test/loadtest.rs | 2 +- .../src/commands/dev}/commands/test/mod.rs | 2 +- .../src/commands/dev}/commands/test/prover.rs | 2 +- .../commands/dev}/commands/test/recovery.rs | 2 +- .../src/commands/dev}/commands/test/revert.rs | 2 +- .../src/commands/dev}/commands/test/rust.rs | 6 +- .../commands/dev}/commands/test/upgrade.rs | 2 +- .../src/commands/dev}/commands/test/utils.rs | 2 +- .../src/commands/dev}/commands/test/wallet.rs | 2 +- .../zkstack/src/commands/dev}/consts.rs | 0 .../crates/zkstack/src/commands/dev}/dals.rs | 2 +- .../zkstack/src/commands/dev}/defaults.rs | 0 .../zkstack/src/commands/dev}/messages.rs | 6 +- .../crates/zkstack/src/commands/dev/mod.rs | 61 +++ .../ecosystem/args/build_transactions.rs | 0 .../commands/ecosystem/args/change_default.rs | 0 .../src/commands/ecosystem/args/create.rs | 0 .../src/commands/ecosystem/args/init.rs | 0 .../src/commands/ecosystem/args/mod.rs | 0 .../commands/ecosystem/build_transactions.rs | 0 .../src/commands/ecosystem/change_default.rs | 0 .../zkstack}/src/commands/ecosystem/common.rs | 0 .../zkstack}/src/commands/ecosystem/create.rs | 0 .../src/commands/ecosystem/create_configs.rs | 0 .../zkstack}/src/commands/ecosystem/init.rs | 0 .../zkstack}/src/commands/ecosystem/mod.rs | 0 .../commands/ecosystem/setup_observability.rs | 0 .../zkstack}/src/commands/ecosystem/utils.rs | 0 .../zkstack}/src/commands/explorer/backend.rs | 0 .../zkstack}/src/commands/explorer/init.rs | 0 .../zkstack}/src/commands/explorer/mod.rs | 0 .../zkstack}/src/commands/explorer/run.rs | 0 .../src/commands/external_node/args/mod.rs | 0 .../external_node/args/prepare_configs.rs | 0 .../src/commands/external_node/args/run.rs | 0 .../src/commands/external_node/init.rs | 0 .../src/commands/external_node/mod.rs | 0 .../commands/external_node/prepare_configs.rs | 0 .../src/commands/external_node/run.rs | 0 .../crates/zkstack}/src/commands/mod.rs | 1 + .../crates/zkstack}/src/commands/portal.rs | 0 .../commands/prover/args/compressor_keys.rs | 0 .../zkstack}/src/commands/prover/args/init.rs | 0 .../commands/prover/args/init_bellman_cuda.rs | 0 .../zkstack}/src/commands/prover/args/mod.rs | 0 .../zkstack}/src/commands/prover/args/run.rs | 0 .../src/commands/prover/args/setup_keys.rs | 0 .../src/commands/prover/compressor_keys.rs | 0 .../zkstack}/src/commands/prover/gcs.rs | 0 .../zkstack}/src/commands/prover/init.rs | 0 .../src/commands/prover/init_bellman_cuda.rs | 0 .../zkstack}/src/commands/prover/mod.rs | 0 .../zkstack}/src/commands/prover/run.rs | 0 .../src/commands/prover/setup_keys.rs | 0 .../crates/zkstack}/src/commands/server.rs | 0 .../crates/zkstack}/src/commands/update.rs | 0 .../crates/zkstack}/src/consts.rs | 0 .../crates/zkstack}/src/defaults.rs | 0 .../crates/zkstack}/src/external_node.rs | 0 .../crates/zkstack}/src/main.rs | 5 + .../crates/zkstack}/src/messages.rs | 4 +- .../crates/zkstack}/src/utils/consensus.rs | 0 .../crates/zkstack}/src/utils/forge.rs | 0 .../crates/zkstack}/src/utils/mod.rs | 0 .../crates/zkstack}/src/utils/ports.rs | 0 .../crates/zkstack}/src/utils/rocks_db.rs | 0 {zk_toolbox => zkstack_cli}/rust-toolchain | 0 zkstack_cli/zkstackup/README.md | 70 ++++ zkstack_cli/zkstackup/install | 121 ++++++ zkstack_cli/zkstackup/zkstackup | 273 +++++++++++++ 264 files changed, 1103 insertions(+), 1457 deletions(-) delete mode 100755 bin/zkt delete mode 100644 zk_toolbox/crates/zk_supervisor/Cargo.toml delete mode 100644 zk_toolbox/crates/zk_supervisor/README.md delete mode 100644 zk_toolbox/crates/zk_supervisor/src/main.rs delete mode 100644 zk_toolbox/zkup/README.md delete mode 100755 zk_toolbox/zkup/install delete mode 100755 zk_toolbox/zkup/zkup rename {zk_toolbox => zkstack_cli}/CHANGELOG.md (100%) rename {zk_toolbox => zkstack_cli}/Cargo.lock (97%) rename {zk_toolbox => zkstack_cli}/Cargo.toml (90%) rename {zk_toolbox => zkstack_cli}/README.md (85%) rename {zk_toolbox => zkstack_cli}/crates/common/Cargo.toml (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/cmd.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/config.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/db.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/docker.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/ethereum.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/external_node.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/files.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/forge.rs (99%) rename {zk_toolbox => zkstack_cli}/crates/common/src/git.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/lib.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prerequisites.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prompt/confirm.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prompt/input.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prompt/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prompt/select.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/server.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/term/error.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/term/logger.rs (97%) rename {zk_toolbox => zkstack_cli}/crates/common/src/term/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/term/spinner.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/version.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/wallets.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/yaml.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/Cargo.toml (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/apps.rs (96%) rename {zk_toolbox => zkstack_cli}/crates/config/src/chain.rs (98%) rename {zk_toolbox => zkstack_cli}/crates/config/src/consensus_config.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/consensus_secrets.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/consts.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/contracts.rs (97%) rename {zk_toolbox => zkstack_cli}/crates/config/src/docker_compose.rs (94%) rename {zk_toolbox => zkstack_cli}/crates/config/src/ecosystem.rs (98%) rename {zk_toolbox => zkstack_cli}/crates/config/src/explorer.rs (98%) rename {zk_toolbox => zkstack_cli}/crates/config/src/explorer_compose.rs (98%) rename {zk_toolbox => zkstack_cli}/crates/config/src/external_node.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/file_config.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/accept_ownership/mod.rs (71%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_ecosystem/input.rs (97%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_ecosystem/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_ecosystem/output.rs (95%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_l2_contracts/input.rs (92%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_l2_contracts/output.rs (73%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/paymaster/mod.rs (83%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/register_chain/input.rs (96%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/register_chain/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/register_chain/output.rs (75%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/script_params.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs (86%) rename {zk_toolbox => zkstack_cli}/crates/config/src/general.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/genesis.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/lib.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/manipulations.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/portal.rs (98%) rename {zk_toolbox => zkstack_cli}/crates/config/src/secrets.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/traits.rs (95%) rename {zk_toolbox => zkstack_cli}/crates/config/src/wallet_creation.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/wallets.rs (91%) rename {zk_toolbox => zkstack_cli}/crates/git_version_macro/Cargo.toml (100%) rename {zk_toolbox => zkstack_cli}/crates/git_version_macro/src/lib.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/Cargo.toml (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/base_token.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/l1_network.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/lib.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/prover_mode.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/token_info.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/wallet_creation.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/Cargo.toml (93%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/README.md (99%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/abi/ConsensusRegistry.json (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/build.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/accept_ownership.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/args/containers.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/args/run_server.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/args/update.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/accept_chain_ownership.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/build_transactions.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/create.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/genesis.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/init/configs.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/init/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/build_transactions.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/common.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/create.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/deploy_l2_contracts.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/deploy_paymaster.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/genesis/database.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/genesis/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/genesis/server.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/init/configs.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/init/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/register_chain.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/set_token_multiplier_setter.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/setup_legacy_bridge.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/consensus/conv.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/consensus/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/consensus/proto/mod.proto (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/consensus/proto/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/consensus/tests.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/containers.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/args/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/args/releases.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/run.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/clean/mod.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/config_writer.rs (96%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/contracts.rs (99%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/args/mod.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/args/new_migration.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/check_sqlx_data.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/drop.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/migrate.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/mod.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/new_migration.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/prepare.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/reset.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/setup.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/fmt.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/lint.rs (95%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/lint_utils.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/mod.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/args/insert_batch.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/args/insert_version.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/args/mod.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/info.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/insert_batch.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/insert_version.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/mod.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/send_transactions/args/mod.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/send_transactions/mod.rs (99%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/snapshot.rs (91%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/sql_fmt.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/fees.rs (78%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/integration.rs (78%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/mod.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/recovery.rs (76%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/revert.rs (93%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/rust.rs (70%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/upgrade.rs (72%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/build.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/db.rs (88%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/fees.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/integration.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/l1_contracts.rs (86%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/loadtest.rs (95%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/mod.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/prover.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/recovery.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/revert.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/rust.rs (94%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/upgrade.rs (91%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/utils.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/wallet.rs (96%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/consts.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/dals.rs (99%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/defaults.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/messages.rs (98%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/mod.rs rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/build_transactions.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/change_default.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/create.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/build_transactions.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/change_default.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/common.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/create.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/create_configs.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/setup_observability.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/utils.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/explorer/backend.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/explorer/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/explorer/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/explorer/run.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/args/prepare_configs.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/args/run.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/prepare_configs.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/run.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/mod.rs (94%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/portal.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/compressor_keys.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/init_bellman_cuda.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/run.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/setup_keys.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/compressor_keys.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/gcs.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/init_bellman_cuda.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/run.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/setup_keys.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/server.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/update.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/consts.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/defaults.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/external_node.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/main.rs (96%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/messages.rs (99%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/consensus.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/forge.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/ports.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/rocks_db.rs (100%) rename {zk_toolbox => zkstack_cli}/rust-toolchain (100%) create mode 100644 zkstack_cli/zkstackup/README.md create mode 100755 zkstack_cli/zkstackup/install create mode 100755 zkstack_cli/zkstackup/zkstackup diff --git a/.githooks/pre-push b/.githooks/pre-push index 73168e08ec4..ef5e77cbc79 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -6,14 +6,29 @@ RED='\033[0;31m' NC='\033[0m' # No Color +# Common prompts +INSTALL_PROPT="Please install ZK Stack CLI using zkstackup from https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli/zkstackup" +FORMAT_PROMPT="Please format the code via 'zkstack dev fmt', cannot push unformatted code" + # Check that prettier formatting rules are not violated. -if which zk_supervisor >/dev/null; then - if ! zk_supervisor fmt --check; then +if which zkstack >/dev/null; then + if ! zkstack dev fmt --check; then echo -e "${RED}Push error!${NC}" - echo "Please format the code via 'zks fmt', cannot push unformatted code" + echo -e "${FORMAT_PROMPT}" exit 1 fi else - echo "Please install zk_toolbox using zkup from https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup, and then run ./bin/zkt from the zksync-era repository." - exit 1 + if which zk_supervisor >/dev/null; then + echo -e "${RED}WARNING: zkup, zk_inception/zki, and zk_supervisor/zks are DEPRECATED.${NC}" + echo -e "${RED}${INSTALL_PROPT}${NC}" + + if ! zk_supervisor fmt --check; then + echo -e "${RED}Push error!${NC}" + echo -e "${FORMAT_PROMPT}" + exit 1 + fi + else + echo -e "${INSTALL_PROPT}" + exit 1 + fi fi diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index a712db9f75b..d68b45e9d43 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,4 +17,4 @@ - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. -- [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. +- [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. diff --git a/.github/release-please/config.json b/.github/release-please/config.json index 86839e804ca..358e249a18b 100644 --- a/.github/release-please/config.json +++ b/.github/release-please/config.json @@ -20,9 +20,9 @@ "release-type": "simple", "component": "prover" }, - "zk_toolbox": { + "zkstack_cli": { "release-type": "simple", - "component": "zk_toolbox", + "component": "zkstack_cli", "plugins": [ "cargo-workspace" ] diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index e0e8fbeecf7..ca19e91219d 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "core": "24.28.0", "prover": "16.5.0", - "zk_toolbox": "0.1.2" + "zkstack_cli": "0.1.2" } diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index bb385b2797b..e4d04b90410 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -113,15 +113,19 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run ./bin/zkt || true ci_run ./bin/zk || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + - name: install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + ci_run zkstack dev contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index deaf087cd3e..33053b6a400 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -127,14 +127,18 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run ./bin/zk || true - ci_run ./bin/zkt || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + ci_run zkstack dev contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index f664bfaaa00..80142cb6005 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -53,6 +53,11 @@ jobs: mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g - name: init run: | @@ -61,9 +66,11 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run zk - ci_run zkt ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + + - name: build contracts + run: | + ci_run zkstack dev contracts - name: update-image run: | diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 2f51229aeaf..7d75fb224d6 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -29,13 +29,14 @@ jobs: run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - - - name: Init + + - name: Install zkstack run: | - ci_run zkt + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local # This does both linting and "building". We're using `zk lint prover` as it's common practice within our repo # `zk lint prover` = cargo clippy, which does cargo check behind the scenes, which is a lightweight version of cargo build - name: Lints - run: ci_run zk_supervisor lint -t rs --check + run: ci_run zkstack dev lint -t rs --check diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 6d0785fe46f..53b25835ff5 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -26,24 +26,30 @@ jobs: - name: Start services run: | ci_localnet_up + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local - name: Build run: | - ci_run ./bin/zkt ci_run yarn install ci_run git config --global --add safe.directory /usr/src/zksync - ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + - name: Database setup + run: | + ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Lints run: | - ci_run zk_supervisor fmt --check - ci_run zk_supervisor lint -t md --check - ci_run zk_supervisor lint -t sol --check - ci_run zk_supervisor lint -t js --check - ci_run zk_supervisor lint -t ts --check - ci_run zk_supervisor lint -t rs --check + ci_run zkstack dev fmt --check + ci_run zkstack dev lint -t md --check + ci_run zkstack dev lint -t sol --check + ci_run zkstack dev lint -t js --check + ci_run zkstack dev lint -t ts --check + ci_run zkstack dev lint -t rs --check - name: Check Database run: | - ci_run zk_supervisor database check-sqlx-data --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + ci_run zkstack dev database check-sqlx-data --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 7c7695ce56e..9aaa476d740 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -56,15 +56,22 @@ jobs: - name: Init run: | ci_run run_retried rustup show - ci_run ./bin/zkt - ci_run zk_supervisor contracts + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + + - name: Build contracts + run: | + ci_run zkstack dev contracts - name: Contracts unit tests run: ci_run yarn l1-contracts test - name: Rust unit tests run: | - ci_run zk_supervisor test rust + ci_run zkstack dev test rust # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch @@ -113,8 +120,15 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run ./bin/zkt - ci_run zk_inception chain create \ + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + + + - name: Create and initialize legacy chain + run: | + ci_run zkstack chain create \ --chain-name legacy \ --chain-id sequential \ --prover-mode no-proofs \ @@ -127,18 +141,18 @@ jobs: --ignore-prerequisites \ --legacy-bridge - ci_run zk_inception ecosystem init --dev --verbose - ci_run zk_supervisor contracts --test-contracts + ci_run zkstack ecosystem init --dev --verbose + ci_run zkstack dev contracts --test-contracts # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - ci_run zk_supervisor config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy - ci_run zk_inception server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & + ci_run zkstack dev config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy + ci_run zkstack server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 - name: Perform loadtest - run: ci_run zk_supervisor t loadtest -v --chain=legacy + run: ci_run zkstack dev t loadtest -v --chain=legacy - name: Show server.log logs if: always() @@ -175,9 +189,11 @@ jobs: run: | ci_localnet_up - - name: Build zk_toolbox - run: ci_run bash -c "./bin/zkt" - + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: Create log directories run: | SERVER_LOGS_DIR=logs/server @@ -213,7 +229,7 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ + ci_run zkstack ecosystem init --deploy-paymaster --deploy-erc20 \ --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_era \ @@ -228,7 +244,7 @@ jobs: - name: Create and initialize Validium chain run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name validium \ --chain-id sequential \ --prover-mode no-proofs \ @@ -240,7 +256,7 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ @@ -249,7 +265,7 @@ jobs: - name: Create and initialize chain with Custom Token run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name custom_token \ --chain-id sequential \ --prover-mode no-proofs \ @@ -261,7 +277,7 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ @@ -270,7 +286,7 @@ jobs: - name: Create and register chain with transactions signed "offline" run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name offline_chain \ --chain-id sequential \ --prover-mode no-proofs \ @@ -282,11 +298,11 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 + ci_run zkstack chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 governor_pk=$(awk '/governor:/ {flag=1} flag && /private_key:/ {print $2; exit}' ./configs/wallets.yaml) - ci_run zk_supervisor send-transactions \ + ci_run zkstack dev send-transactions \ --file ./transactions/chain/offline_chain/register-hyperchain-txns.json \ --l1-rpc-url http://127.0.0.1:8545 \ --private-key $governor_pk @@ -305,7 +321,7 @@ jobs: - name: Create and initialize Consensus chain run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name consensus \ --chain-id sequential \ --prover-mode no-proofs \ @@ -317,7 +333,7 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ @@ -331,19 +347,19 @@ jobs: - name: Build test dependencies run: | - ci_run zk_supervisor test build + ci_run zkstack dev test build - name: Initialize Contract verifier run: | - ci_run zk_inception contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era - ci_run zk_inception contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & + ci_run zkstack contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era + ci_run zkstack contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & - name: Run servers run: | - ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & - ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & - ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & - ci_run zk_inception server --ignore-prerequisites --chain consensus \ + ci_run zkstack server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & + ci_run zkstack server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & + ci_run zkstack server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & + ci_run zkstack server --ignore-prerequisites --chain consensus \ --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & @@ -351,67 +367,67 @@ jobs: - name: Setup attester committee for the consensus chain run: | - ci_run zk_inception consensus set-attester-committee --chain consensus --from-genesis &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log + ci_run zkstack consensus set-attester-committee --chain consensus --from-genesis &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log - name: Run integration tests run: | - ci_run ./bin/run_on_all_chains.sh "zk_supervisor test integration --no-deps --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} + ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Init external nodes run: | - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era - ci_run zk_inception external-node init --ignore-prerequisites --chain era + ci_run zkstack external-node init --ignore-prerequisites --chain era - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium - ci_run zk_inception external-node init --ignore-prerequisites --chain validium + ci_run zkstack external-node init --ignore-prerequisites --chain validium - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token - ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token + ci_run zkstack external-node init --ignore-prerequisites --chain custom_token - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus - ci_run zk_inception external-node init --ignore-prerequisites --chain consensus + ci_run zkstack external-node init --ignore-prerequisites --chain consensus - name: Run recovery tests (from snapshot) run: | - ci_run ./bin/run_on_all_chains.sh "zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} + ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --snapshot --no-deps --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run recovery tests (from genesis) run: | - ci_run ./bin/run_on_all_chains.sh "zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} + ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run external node server run: | - ci_run zk_inception external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & + ci_run zkstack external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & + ci_run zkstack external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & + ci_run zkstack external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & + ci_run zkstack external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & - name: Run integration tests en run: | - ci_run ./bin/run_on_all_chains.sh "zk_supervisor test integration --no-deps --ignore-prerequisites --external-node" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} + ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites --external-node" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Fee projection tests run: | ci_run killall -INT zksync_server || true - ci_run ./bin/run_on_all_chains.sh "zk_supervisor test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.FEES_LOGS_DIR }} + ci_run ./bin/run_on_all_chains.sh "zkstack dev test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.FEES_LOGS_DIR }} - name: Run revert tests run: | ci_run killall -INT zksync_server || true ci_run killall -INT zksync_external_node || true - ci_run ./bin/run_on_all_chains.sh "zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} + ci_run ./bin/run_on_all_chains.sh "zkstack dev test revert --no-deps --external-node --no-kill --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} # Upgrade tests should run last, because as soon as they # finish the bootloader will be different # TODO make upgrade tests safe to run multiple times - name: Run upgrade test run: | - ci_run zk_supervisor test upgrade --no-deps --chain era + ci_run zkstack dev test upgrade --no-deps --chain era - name: Upload logs diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 4138a7eb7a3..e1a9cf78df7 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -27,13 +27,17 @@ jobs: run_retried docker compose pull zk docker compose up -d zk + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + - name: Build run: | - ci_run ./bin/zkt ci_run yarn install ci_run git config --global --add safe.directory /usr/src/zksync - name: Lints run: | - ci_run zk_supervisor fmt --check - ci_run zk_supervisor lint -t md --check + ci_run zkstack dev fmt --check + ci_run zkstack dev lint -t md --check diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 3f842b23488..6cb9c26d21e 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -30,10 +30,14 @@ jobs: mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - - name: Init + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + + - name: Database setup run: | - ci_run zkt - ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Formatting run: ci_run bash -c "cd prover && cargo fmt --check" @@ -65,12 +69,16 @@ jobs: mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: Init run: | - ci_run zkt ci_run run_retried rustup show - name: Prover unit tests run: | # Not all tests are enabled, since prover and setup_key_generator_and_server requires bellman-cuda to be present - ci_run zk_supervisor test prover + ci_run zkstack dev test prover diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0a27a719aeb..fd9dedf8af4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,7 @@ jobs: outputs: core: ${{ steps.changed-files.outputs.core_any_changed }} prover: ${{ steps.changed-files.outputs.prover_any_changed }} - zk_toolbox: ${{ steps.changed-files.outputs.zk_toolbox_any_changed }} + zkstack_cli: ${{ steps.changed-files.outputs.zkstack_cli_any_changed }} docs: ${{ steps.changed-files.outputs.docs_any_changed }} all: ${{ steps.changed-files.outputs.all_any_changed }} steps: @@ -58,7 +58,7 @@ jobs: - '.github/workflows/ci-core-lint-reusable.yml' - 'Cargo.toml' - 'Cargo.lock' - - 'zk_toolbox/**' + - 'zkstack_cli/**' - '!**/*.md' - '!**/*.MD' - 'docker-compose.yml' diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml index 42791eab666..3fc83cc62eb 100644 --- a/.github/workflows/new-build-contract-verifier-template.yml +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -38,6 +38,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo CI=1 >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $HOME/.local/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env @@ -128,13 +129,17 @@ jobs: run: | mkdir -p ./volumes/postgres docker compose up -d postgres - zkt || true + + - name: Install zkstack + run: | + ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup + zkstackup --local || true - name: build contracts shell: bash run: | cp etc/tokens/{test,localhost}.json - zk_supervisor contracts + zkstack dev contracts - name: Upload contracts uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml index fba6a68b8ee..392acbc9f8f 100644 --- a/.github/workflows/new-build-core-template.yml +++ b/.github/workflows/new-build-core-template.yml @@ -43,6 +43,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo CI=1 >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $HOME/.local/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env @@ -133,13 +134,17 @@ jobs: run: | mkdir -p ./volumes/postgres docker compose up -d postgres - zkt || true + - name: Install zkstack + run: | + ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup + zkstackup --local || true + - name: build contracts shell: bash run: | cp etc/tokens/{test,localhost}.json - zk_supervisor contracts + zkstack dev contracts - name: Upload contracts uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 6e044287ad3..ccf8f370267 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -35,6 +35,7 @@ jobs: touch .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $(pwd)/zkstack_cli/zkstackup >> $GITHUB_PATH echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env @@ -44,12 +45,12 @@ jobs: run: | run_retried docker compose pull zk docker compose up -d zk - + - name: run benchmarks on base branch shell: bash run: | - ci_run zkt - ci_run zk_supervisor contracts --system-contracts + ci_run zkt || ci_run zkstackup -g --local # TODO remove zkt in an upcoming PR + ci_run zk_supervisor contracts --system-contracts || ci_run zkstack dev contracts --system-contracts # TODO remove zk_supervisor in an upcoming PR ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes @@ -60,8 +61,8 @@ jobs: - name: run benchmarks on PR shell: bash run: | - ci_run zkt - ci_run zk_supervisor contracts --system-contracts + ci_run zkstackup -g --local + ci_run zkstack dev contracts --system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 4c8c90a0d8f..3a2008e1f8e 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -28,13 +28,20 @@ jobs: echo "RUSTC_WRAPPER=sccache" >> .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local - name: init run: | run_retried docker compose pull zk docker compose up -d zk - ci_run zkt - ci_run zk_supervisor contracts + + - name: build contracts + run: | + ci_run zkstack dev contracts - name: run benchmarks run: | diff --git a/.gitignore b/.gitignore index bbd13e2319a..86ed40c7041 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,7 @@ Cargo.lock !/Cargo.lock !/infrastructure/zksync-crypto/Cargo.lock !/prover/Cargo.lock -!/zk_toolbox/Cargo.lock +!/zkstack_cli/Cargo.lock /etc/env/target/* /etc/env/.current @@ -112,7 +112,7 @@ hyperchain-*.yml prover/crates/bin/vk_setup_data_generator_server_fri/data/setup_* prover/data/keys/setup_* -# Zk Toolbox +# ZK Stack CLI chains/era/configs/* chains/gateway/* configs/* diff --git a/bin/zk b/bin/zk index 868c4e338cd..f3b927de8f8 100755 --- a/bin/zk +++ b/bin/zk @@ -39,6 +39,7 @@ check_yarn_version() { # and it will be hard for them to see what went wrong. check_subdirectory check_yarn_version + if [ -z "$1" ]; then cd $ZKSYNC_HOME run_retried yarn install --frozen-lockfile && yarn utils build && yarn zk build diff --git a/bin/zkt b/bin/zkt deleted file mode 100755 index f781ca67528..00000000000 --- a/bin/zkt +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -cd $(dirname $0) - -if which zkup >/dev/null; then - cargo uninstall zk_inception - cargo uninstall zk_supervisor - git config --local core.hooksPath || - git config --local core.hooksPath ./.githooks - zkup -p .. --alias -else - echo zkup is not installed, please install it https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup - cd ../zk_toolbox - cargo install --path ./crates/zk_inception --force - cargo install --path ./crates/zk_supervisor --force -fi - diff --git a/core/lib/utils/src/env.rs b/core/lib/utils/src/env.rs index 5ae07caf148..8f4aa1da940 100644 --- a/core/lib/utils/src/env.rs +++ b/core/lib/utils/src/env.rs @@ -19,8 +19,8 @@ pub enum Workspace<'a> { Core(&'a Path), /// `prover` folder. Prover(&'a Path), - /// `toolbox` folder. - Toolbox(&'a Path), + /// ZK Stack CLI folder. + ZkStackCli(&'a Path), } impl Workspace<'static> { @@ -48,7 +48,7 @@ impl Workspace<'static> { impl<'a> Workspace<'a> { const PROVER_DIRECTORY_NAME: &'static str = "prover"; - const TOOLBOX_DIRECTORY_NAME: &'static str = "zk_toolbox"; + const ZKSTACK_CLI_DIRECTORY_NAME: &'static str = "zkstack_cli"; /// Returns the path of the core workspace. /// For `Workspace::None`, considers the current directory to represent core workspace. @@ -56,7 +56,7 @@ impl<'a> Workspace<'a> { match self { Self::None => PathBuf::from("."), Self::Core(path) => path.into(), - Self::Prover(path) | Self::Toolbox(path) => path.parent().unwrap().into(), + Self::Prover(path) | Self::ZkStackCli(path) => path.parent().unwrap().into(), } } @@ -68,11 +68,11 @@ impl<'a> Workspace<'a> { } } - /// Returns the path of the `zk_toolbox`` workspace. - pub fn toolbox(self) -> PathBuf { + /// Returns the path of the ZK Stack CLI workspace. + pub fn zkstack_cli(self) -> PathBuf { match self { - Self::Toolbox(path) => path.into(), - _ => self.core().join(Self::TOOLBOX_DIRECTORY_NAME), + Self::ZkStackCli(path) => path.into(), + _ => self.core().join(Self::ZKSTACK_CLI_DIRECTORY_NAME), } } } @@ -81,8 +81,8 @@ impl<'a> From<&'a Path> for Workspace<'a> { fn from(path: &'a Path) -> Self { if path.ends_with(Self::PROVER_DIRECTORY_NAME) { Self::Prover(path) - } else if path.ends_with(Self::TOOLBOX_DIRECTORY_NAME) { - Self::Toolbox(path) + } else if path.ends_with(Self::ZKSTACK_CLI_DIRECTORY_NAME) { + Self::ZkStackCli(path) } else { Self::Core(path) } @@ -154,16 +154,16 @@ mod tests { let workspace = Workspace::locate(); assert_matches!(workspace, Workspace::Core(_)); let core_path = workspace.core(); - // Check if prover and toolbox directories exist. + // Check if prover and ZK Stack CLI directories exist. assert!(workspace.prover().exists()); assert_matches!( Workspace::from(workspace.prover().as_path()), Workspace::Prover(_) ); - assert!(workspace.toolbox().exists()); + assert!(workspace.zkstack_cli().exists()); assert_matches!( - Workspace::from(workspace.toolbox().as_path()), - Workspace::Toolbox(_) + Workspace::from(workspace.zkstack_cli().as_path()), + Workspace::ZkStackCli(_) ); // Prover. @@ -181,17 +181,17 @@ mod tests { Workspace::from(workspace.core().as_path()), Workspace::Core(_) ); - assert!(workspace.toolbox().exists()); + assert!(workspace.zkstack_cli().exists()); assert_matches!( - Workspace::from(workspace.toolbox().as_path()), - Workspace::Toolbox(_) + Workspace::from(workspace.zkstack_cli().as_path()), + Workspace::ZkStackCli(_) ); - // Toolbox. - std::env::set_current_dir(workspace.toolbox()).unwrap(); + // ZK Stack CLI + std::env::set_current_dir(workspace.zkstack_cli()).unwrap(); let workspace_path = locate_workspace_inner().unwrap(); let workspace = Workspace::from(workspace_path.as_path()); - assert_matches!(workspace, Workspace::Toolbox(_)); + assert_matches!(workspace, Workspace::ZkStackCli(_)); assert_eq!(workspace.core(), core_path); assert_matches!( Workspace::from(workspace.core().as_path()), diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 462404af606..8567be6d6d3 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -84,9 +84,9 @@ export async function getExternalNodeHealth(url: string) { } } -export async function dropNodeData(env: { [key: string]: string }, useZkSupervisor?: boolean, chain?: string) { - if (useZkSupervisor) { - let cmd = 'zk_inception external-node init'; +export async function dropNodeData(env: { [key: string]: string }, useZkStack?: boolean, chain?: string) { + if (useZkStack) { + let cmd = 'zkstack external-node init'; cmd += chain ? ` --chain ${chain}` : ''; await executeNodeCommand(env, cmd); } else { @@ -176,7 +176,7 @@ export class NodeProcess { logsFile: FileHandle | string, pathToHome: string, components: NodeComponents = NodeComponents.STANDARD, - useZkInception?: boolean, + useZkStack?: boolean, chain?: string ) { const logs = typeof logsFile === 'string' ? await fs.open(logsFile, 'a') : logsFile; @@ -186,7 +186,7 @@ export class NodeProcess { stdio: ['ignore', logs.fd, logs.fd], cwd: pathToHome, env, - useZkInception, + useZkStack, chain }); diff --git a/core/tests/recovery-test/src/utils.ts b/core/tests/recovery-test/src/utils.ts index 98c6b6d4405..c60f5603f17 100644 --- a/core/tests/recovery-test/src/utils.ts +++ b/core/tests/recovery-test/src/utils.ts @@ -48,19 +48,19 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception external-node run'; + if (useZkStack) { + command = 'zkstack external-node run'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'zk external-node --'; diff --git a/core/tests/recovery-test/tests/snapshot-recovery.test.ts b/core/tests/recovery-test/tests/snapshot-recovery.test.ts index cadf146c522..eca0da78d78 100644 --- a/core/tests/recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/recovery-test/tests/snapshot-recovery.test.ts @@ -458,10 +458,10 @@ async function decompressGzip(filePath: string): Promise { }); } -async function createSnapshot(zkSupervisor: boolean) { +async function createSnapshot(useZkStack: boolean) { let command = ''; - if (zkSupervisor) { - command = `zk_supervisor snapshot create`; + if (useZkStack) { + command = `zkstack dev snapshot create`; command += ` --chain ${fileConfig.chain}`; } else { command = `zk run snapshots-creator`; diff --git a/core/tests/revert-test/tests/utils.ts b/core/tests/revert-test/tests/utils.ts index ea8a45b97c3..fe5cb40799a 100644 --- a/core/tests/revert-test/tests/utils.ts +++ b/core/tests/revert-test/tests/utils.ts @@ -51,19 +51,19 @@ export function runServerInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; if (chain) { command += ` --chain ${chain}`; } @@ -78,19 +78,19 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception external-node run'; + if (useZkStack) { + command = 'zkstack external-node run'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'zk external-node'; @@ -334,7 +334,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env: env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); @@ -362,7 +362,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); diff --git a/core/tests/ts-integration/src/utils.ts b/core/tests/ts-integration/src/utils.ts index 128d0be57d0..bb6fa93757e 100644 --- a/core/tests/ts-integration/src/utils.ts +++ b/core/tests/ts-integration/src/utils.ts @@ -20,21 +20,21 @@ export function runServerInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: ProcessEnvOptions['cwd']; env?: ProcessEnvOptions['env']; - useZkInception?: boolean; + useZkStack?: boolean; newL1GasPrice?: string; newPubdataPrice?: string; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; if (chain) { command += ` --chain ${chain}`; } @@ -167,7 +167,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env: env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 665b570ede7..79a690a1580 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -160,7 +160,7 @@ describe('Upgrade test', function () { components: serverComponents, stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); // Server may need some time to recompile if it's a cold run, so wait for it. @@ -377,7 +377,7 @@ describe('Upgrade test', function () { components: serverComponents, stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); await utils.sleep(10); diff --git a/core/tests/upgrade-test/tests/utils.ts b/core/tests/upgrade-test/tests/utils.ts index 7a7829caf86..9f130c1e556 100644 --- a/core/tests/upgrade-test/tests/utils.ts +++ b/core/tests/upgrade-test/tests/utils.ts @@ -7,19 +7,19 @@ export function runServerInBackground({ components, stdio, cwd, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }) { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'cd $ZKSYNC_HOME && cargo run --bin zksync_server --release --'; @@ -71,8 +71,8 @@ export interface Contracts { stateTransitonManager: any; } -export function initContracts(pathToHome: string, zkToolbox: boolean): Contracts { - if (zkToolbox) { +export function initContracts(pathToHome: string, zkStack: boolean): Contracts { + if (zkStack) { const CONTRACTS_FOLDER = `${pathToHome}/contracts`; return { l1DefaultUpgradeAbi: new ethers.Interface( diff --git a/docker/Makefile b/docker/Makefile index d7dc80c6f34..4e0ca51f904 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -66,8 +66,8 @@ prepare-contracts: check-tools check-contracts @cd ../ && \ export ZKSYNC_HOME=$$(pwd) && \ export PATH=$$PATH:$${ZKSYNC_HOME}/bin:$${ZKSYNC_HOME}/zkstack_cli/zkstackup && \ - zkt || true && \ - zk_supervisor contracts && \ + zkstackup -g --local || true && \ + zkstack dev contracts && \ mkdir -p contracts/l1-contracts/artifacts # Download setup-key diff --git a/flake.nix b/flake.nix index 3321e67e27b..8c08e880910 100644 --- a/flake.nix +++ b/flake.nix @@ -91,7 +91,7 @@ ./Cargo.toml ./core ./prover - ./zk_toolbox + ./zkstack_cli ./.github/release-please/manifest.json ]; }; diff --git a/infrastructure/zk/src/fmt.ts b/infrastructure/zk/src/fmt.ts index e58cdbc8e54..b9f7f1b9d60 100644 --- a/infrastructure/zk/src/fmt.ts +++ b/infrastructure/zk/src/fmt.ts @@ -48,7 +48,7 @@ export async function rustfmt(check: boolean = false) { const dirs = [ process.env.ZKSYNC_HOME as string, `${process.env.ZKSYNC_HOME}/prover`, - `${process.env.ZKSYNC_HOME}/zk_toolbox` + `${process.env.ZKSYNC_HOME}/zkstack_cli` ]; for (const dir of dirs) { diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts index 7a24881c0f9..49ae4d0753e 100644 --- a/infrastructure/zk/src/lint.ts +++ b/infrastructure/zk/src/lint.ts @@ -38,12 +38,12 @@ async function proverClippy() { await utils.spawn('cargo clippy --tests --locked -- -D warnings'); } -async function toolboxClippy() { - process.chdir(`${process.env.ZKSYNC_HOME}/zk_toolbox`); +async function zkstackClippy() { + process.chdir(`${process.env.ZKSYNC_HOME}/zkstack_cli`); await utils.spawn('cargo clippy --tests --locked -- -D warnings'); } -const ARGS = [...EXTENSIONS, 'rust', 'prover', 'contracts', 'toolbox'] as const; +const ARGS = [...EXTENSIONS, 'rust', 'prover', 'contracts', 'zkstack_cli'] as const; export const command = new Command('lint') .description('lint code') @@ -61,8 +61,8 @@ export const command = new Command('lint') case 'contracts': await lintContracts(cmd.check); break; - case 'toolbox': - await toolboxClippy(); + case 'zkstack_cli': + await zkstackClippy(); break; default: await lint(extension, cmd.check); @@ -72,7 +72,7 @@ export const command = new Command('lint') promises.push(lintContracts(cmd.check)); promises.push(clippy()); promises.push(proverClippy()); - promises.push(toolboxClippy()); + promises.push(zkstackClippy()); await Promise.all(promises); } }); diff --git a/prover/docs/05_proving_batch.md b/prover/docs/05_proving_batch.md index e09a44cb0ff..c35de975bf7 100644 --- a/prover/docs/05_proving_batch.md +++ b/prover/docs/05_proving_batch.md @@ -14,17 +14,25 @@ GPU, which requires an NVIDIA A100 80GB GPU. ### Prerequisites -First of all, you need to install CUDA drivers, all other things will be dealt with by `zk_inception` and `prover_cli` -tools. For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). +First of all, you need to install CUDA drivers, all other things will be dealt with by `zkstack` and `prover_cli` tools. +For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). Install the prerequisites, which you can find [here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/setup-dev.md). Note, that if you are not using Google VM instance, you also need to install [gcloud](https://cloud.google.com/sdk/docs/install#deb). -Now, you can use `zk_inception` and `prover_cli` tools for setting up the env and running prover subsystem. +Now, you can use `zkstack` and `prover_cli` tools for setting up the env and running prover subsystem. -```shell -cargo +nightly-2024-08-01 install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor prover_cli --force +First, install `zkstackup` with: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +Then install the most recent version of `zkstack` with: + +```bash +zkstackup ``` ## Initializing system @@ -33,14 +41,14 @@ After you have installed the tool, you can create ecosystem(you need to run only running: ```shell -zk_inception ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true +zkstack ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true ``` The command will create the ecosystem and all the necessary components for the prover subsystem. You can leave default values for all the prompts you will see Now, you need to initialize the prover subsystem by running: ```shell -zk_inception prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false +zkstack prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false ``` For prompts you can leave default values as well. @@ -87,13 +95,23 @@ After you have the data, you need to prepare the system to run the batch. So, da the protocol version it should use. You can do that with running ```shell -zk_supervisor prover-version +zkstack dev prover info ``` Example output: ```shell -Current protocol version found in zksync-era: 0.24.2, snark_wrapper: "0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2" +=============================== + +Current prover setup information: + +Protocol version: 0.24.2 + +Snark wrapper: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 + +Database URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_era + +=============================== ``` This command will provide you with the information about the semantic protocol version(you need to know only minor and @@ -118,7 +136,7 @@ prover_cli insert-batch --number= --version=` - -###### **Subcommands:** - -- `database` — Database related commands -- `test` — Run tests -- `clean` — Clean artifacts -- `snapshot` — Snapshots creator -- `lint` — Lint code -- `fmt` — Format code -- `prover-version` — Protocol version used by provers - -###### **Options:** - -- `-v`, `--verbose` — Verbose mode -- `--chain ` — Chain to use -- `--ignore-prerequisites` — Ignores prerequisites checks - -## `zk_supervisor database` - -Database related commands - -**Usage:** `zk_supervisor database ` - -###### **Subcommands:** - -- `check-sqlx-data` — Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked. -- `drop` — Drop databases. If no databases are selected, all databases will be dropped. -- `migrate` — Migrate databases. If no databases are selected, all databases will be migrated. -- `new-migration` — Create new migration -- `prepare` — Prepare sqlx-data.json. If no databases are selected, all databases will be prepared. -- `reset` — Reset databases. If no databases are selected, all databases will be reset. -- `setup` — Setup databases. If no databases are selected, all databases will be setup. - -## `zk_supervisor database check-sqlx-data` - -Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked. - -**Usage:** `zk_supervisor database check-sqlx-data [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database drop` - -Drop databases. If no databases are selected, all databases will be dropped. - -**Usage:** `zk_supervisor database drop [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database migrate` - -Migrate databases. If no databases are selected, all databases will be migrated. - -**Usage:** `zk_supervisor database migrate [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database new-migration` - -Create new migration - -**Usage:** `zk_supervisor database new-migration [OPTIONS]` - -###### **Options:** - -- `--database ` — Database to create new migration for - - Possible values: `prover`, `core` - -- `--name ` — Migration name - -## `zk_supervisor database prepare` - -Prepare sqlx-data.json. If no databases are selected, all databases will be prepared. - -**Usage:** `zk_supervisor database prepare [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database reset` - -Reset databases. If no databases are selected, all databases will be reset. - -**Usage:** `zk_supervisor database reset [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database setup` - -Setup databases. If no databases are selected, all databases will be setup. - -**Usage:** `zk_supervisor database setup [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor test` - -Run tests - -**Usage:** `zk_supervisor test ` - -###### **Subcommands:** - -- `integration` — Run integration tests -- `revert` — Run revert tests -- `recovery` — Run recovery tests -- `upgrade` — Run upgrade tests -- `rust` — Run unit-tests, accepts optional cargo test flags -- `l1-contracts` — Run L1 contracts tests -- `prover` — Run prover tests - -## `zk_supervisor test integration` - -Run integration tests - -**Usage:** `zk_supervisor test integration [OPTIONS]` - -###### **Options:** - -- `-e`, `--external-node` — Run tests for external node - -## `zk_supervisor test revert` - -Run revert tests - -**Usage:** `zk_supervisor test revert [OPTIONS]` - -###### **Options:** - -- `--enable-consensus` — Enable consensus -- `-e`, `--external-node` — Run tests for external node - -## `zk_supervisor test recovery` - -Run recovery tests - -**Usage:** `zk_supervisor test recovery [OPTIONS]` - -###### **Options:** - -- `-s`, `--snapshot` — Run recovery from a snapshot instead of genesis - -## `zk_supervisor test upgrade` - -Run upgrade tests - -**Usage:** `zk_supervisor test upgrade` - -## `zk_supervisor test rust` - -Run unit-tests, accepts optional cargo test flags - -**Usage:** `zk_supervisor test rust [OPTIONS]` - -###### **Options:** - -- `--options ` — Cargo test flags - -## `zk_supervisor test l1-contracts` - -Run L1 contracts tests - -**Usage:** `zk_supervisor test l1-contracts` - -## `zk_supervisor test prover` - -Run prover tests - -**Usage:** `zk_supervisor test prover` - -## `zk_supervisor clean` - -Clean artifacts - -**Usage:** `zk_supervisor clean ` - -###### **Subcommands:** - -- `all` — Remove containers and contracts cache -- `containers` — Remove containers and docker volumes -- `contracts-cache` — Remove contracts caches - -## `zk_supervisor clean all` - -Remove containers and contracts cache - -**Usage:** `zk_supervisor clean all` - -## `zk_supervisor clean containers` - -Remove containers and docker volumes - -**Usage:** `zk_supervisor clean containers` - -## `zk_supervisor clean contracts-cache` - -Remove contracts caches - -**Usage:** `zk_supervisor clean contracts-cache` - -## `zk_supervisor snapshot` - -Snapshots creator - -**Usage:** `zk_supervisor snapshot ` - -###### **Subcommands:** - -- `create` — - -## `zk_supervisor snapshot create` - -**Usage:** `zk_supervisor snapshot create` - -## `zk_supervisor lint` - -Lint code - -**Usage:** `zk_supervisor lint [OPTIONS]` - -###### **Options:** - -- `-c`, `--check` -- `-e`, `--extensions ` - - Possible values: `md`, `sol`, `js`, `ts`, `rs` - -## `zk_supervisor fmt` - -Format code - -**Usage:** `zk_supervisor fmt [OPTIONS] [COMMAND]` - -###### **Subcommands:** - -- `rustfmt` — -- `contract` — -- `prettier` — - -###### **Options:** - -- `-c`, `--check` - -## `zk_supervisor fmt rustfmt` - -**Usage:** `zk_supervisor fmt rustfmt` - -## `zk_supervisor fmt contract` - -**Usage:** `zk_supervisor fmt contract` - -## `zk_supervisor fmt prettier` - -**Usage:** `zk_supervisor fmt prettier [OPTIONS]` - -###### **Options:** - -- `-e`, `--extensions ` - - Possible values: `md`, `sol`, `js`, `ts`, `rs` - -## `zk_supervisor prover info` - -Prints prover protocol version, snark wrapper and prover database URL - -**Usage:** `zk_supervisor prover info` - -## `zk_supervisor prover insert-version` - -Inserts protocol version into prover database. - -**Usage:** `zk_supervisor prover insert-version [OPTIONS]` - -###### **Options:** - -- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. -- `--snark-wrapper ` — Snark wrapper hash. -- `--default` - use default values for protocol version and snark wrapper hash (the ones found in zksync-era). - -## `zk_supervisor prover insert-batch` - -Inserts batch into prover database. - -**Usage:** `zk_supervisor prover insert-batch` - -###### **Options:** - -- `--number ` — Number of the batch to insert. -- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. -- `--default` - use default value for protocol version (the one found in zksync-era). - -
- - This document was generated automatically by -clap-markdown. diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs deleted file mode 100644 index 242affd8a71..00000000000 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ /dev/null @@ -1,151 +0,0 @@ -use clap::{Parser, Subcommand}; -use commands::{ - config_writer::ConfigWriterArgs, contracts::ContractsArgs, database::DatabaseCommands, - lint::LintArgs, prover::ProverCommands, send_transactions::args::SendTransactionsArgs, - snapshot::SnapshotCommands, test::TestCommands, -}; -use common::{ - check_general_prerequisites, - config::{global_config, init_global_config, GlobalConfig}, - error::log_error, - init_prompt_theme, logger, - version::version_message, -}; -use config::EcosystemConfig; -use messages::{ - msg_global_chain_does_not_exist, MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, - MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, - MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, - MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, -}; -use xshell::Shell; - -use crate::commands::{clean::CleanCommands, fmt::FmtArgs}; - -mod commands; -mod consts; -mod dals; -mod defaults; -mod messages; - -#[derive(Parser, Debug)] -#[command( - version = version_message(env!("CARGO_PKG_VERSION")), - about -)] -struct Supervisor { - #[command(subcommand)] - command: SupervisorSubcommands, - #[clap(flatten)] - global: SupervisorGlobalArgs, -} - -#[derive(Subcommand, Debug)] -enum SupervisorSubcommands { - #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT, alias = "db")] - Database(DatabaseCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_TESTS_ABOUT, alias = "t")] - Test(TestCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_CLEAN)] - Clean(CleanCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT)] - Snapshot(SnapshotCommands), - #[command(about = MSG_SUBCOMMAND_LINT_ABOUT, alias = "l")] - Lint(LintArgs), - #[command(about = MSG_SUBCOMMAND_FMT_ABOUT)] - Fmt(FmtArgs), - #[command(hide = true)] - Markdown, - #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] - Prover(ProverCommands), - #[command(about = MSG_CONTRACTS_ABOUT)] - Contracts(ContractsArgs), - #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] - ConfigWriter(ConfigWriterArgs), - #[command(about = MSG_SEND_TXNS_ABOUT)] - SendTransactions(SendTransactionsArgs), -} - -#[derive(Parser, Debug)] -#[clap(next_help_heading = "Global options")] -struct SupervisorGlobalArgs { - /// Verbose mode - #[clap(short, long, global = true)] - verbose: bool, - /// Chain to use - #[clap(long, global = true)] - chain: Option, - /// Ignores prerequisites checks - #[clap(long, global = true)] - ignore_prerequisites: bool, -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - human_panic::setup_panic!(); - - // We must parse arguments before printing the intro, because some autogenerated - // Clap commands (like `--version` would look odd otherwise). - let args = Supervisor::parse(); - - init_prompt_theme(); - - logger::new_empty_line(); - logger::intro(); - - let shell = Shell::new().unwrap(); - init_global_config_inner(&shell, &args.global)?; - - if !global_config().ignore_prerequisites { - check_general_prerequisites(&shell); - } - - match run_subcommand(args, &shell).await { - Ok(_) => {} - Err(error) => { - log_error(error); - std::process::exit(1); - } - } - - Ok(()) -} - -async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { - match args.command { - SupervisorSubcommands::Database(command) => commands::database::run(shell, command).await?, - SupervisorSubcommands::Test(command) => commands::test::run(shell, command).await?, - SupervisorSubcommands::Clean(command) => commands::clean::run(shell, command)?, - SupervisorSubcommands::Snapshot(command) => commands::snapshot::run(shell, command).await?, - SupervisorSubcommands::Markdown => { - clap_markdown::print_help_markdown::(); - } - SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, - SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, - SupervisorSubcommands::Prover(command) => commands::prover::run(shell, command).await?, - SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, - SupervisorSubcommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, - SupervisorSubcommands::SendTransactions(args) => { - commands::send_transactions::run(shell, args).await? - } - } - Ok(()) -} - -fn init_global_config_inner(shell: &Shell, args: &SupervisorGlobalArgs) -> anyhow::Result<()> { - if let Some(name) = &args.chain { - if let Ok(config) = EcosystemConfig::from_file(shell) { - let chains = config.list_of_chains(); - if !chains.contains(name) { - anyhow::bail!(msg_global_chain_does_not_exist(name, &chains.join(", "))); - } - } - } - - init_global_config(GlobalConfig { - verbose: args.verbose, - chain_name: args.chain.clone(), - ignore_prerequisites: args.ignore_prerequisites, - }); - Ok(()) -} diff --git a/zk_toolbox/zkup/README.md b/zk_toolbox/zkup/README.md deleted file mode 100644 index d6e3e634688..00000000000 --- a/zk_toolbox/zkup/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# zkup - zk_toolbox Installer - -`zkup` is a script designed to simplify the installation of -[zk_toolbox](https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox). It allows you to install the tool from a -local directory or directly from a GitHub repository. - -## Getting Started - -To install `zkup`, run the following command: - -```bash -curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zk_toolbox/zkup/install | bash -``` - -After installing `zkup`, you can use it to install `zk_toolbox` with: - -```bash -zkup -``` - -## Usage - -The `zkup` script provides various options for installing `zk_toolbox`: - -### Options - -- `-p, --path ` - Specify a local path to install `zk_toolbox` from. This option is ignored if `--repo` is provided. - -- `-r, --repo ` - GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". - -- `-b, --branch ` - Git branch to use when installing from a repository. Ignored if `--commit` or `--version` is provided. - -- `-c, --commit ` - Git commit hash to use when installing from a repository. Ignored if `--branch` or `--version` is provided. - -- `-v, --version ` - Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. - -- `--inception` - Installs `zk_inception` from the repository. By default, `zkup` installs `zk_inception` and `zk_supervisor`. - -- `--supervisor` - Installs `zk_supervisor` from the repository. - -### Local Installation - -If you provide a local path using the `-p` or `--path` option, `zkup` will install `zk_toolbox` from that directory. -Note that repository-specific arguments (`--repo`, `--branch`, `--commit`, `--version`) will be ignored in this case to -preserve git state. - -### Repository Installation - -By default, `zkup` installs `zk_toolbox` from the "matter-labs/zksync-era" GitHub repository. You can specify a -different repository, branch, commit, or version using the respective options. If multiple arguments are provided, -`zkup` will prioritize them as follows: - -- `--version` -- `--commit` -- `--branch` - -### Examples - -**Install from a GitHub repository with a specific version:** - -```bash -zkup --repo matter-labs/zksync-era --version 0.1.1 -``` - -**Install from a local path, only installing `zk_inception`:** - -```bash -zkup --path /path/to/local/zk_toolbox --inception -``` diff --git a/zk_toolbox/zkup/install b/zk_toolbox/zkup/install deleted file mode 100755 index 4e24b03dec4..00000000000 --- a/zk_toolbox/zkup/install +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -BASE_DIR=${XDG_CONFIG_HOME:-$HOME} -ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} -ZKT_BIN_DIR="$ZKT_DIR/bin" - -BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zk_toolbox/zkup/zkup" -BIN_PATH="$ZKT_BIN_DIR/zkup" - -mkdir -p "$ZKT_BIN_DIR" -curl -sSfL "$BIN_URL" -o "$BIN_PATH" -chmod +x "$BIN_PATH" - -if [[ ":$PATH:" == *":${ZKT_BIN_DIR}:"* ]]; then - echo "zkup: found ${ZKT_BIN_DIR} in PATH" - exit 0 -fi - -case $SHELL in -*/zsh) - PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" - ;; -*/bash) - PROFILE="$HOME/.bashrc" - ;; -*/fish) - PROFILE="$HOME/.config/fish/config.fish" - ;; -*/ash) - PROFILE="$HOME/.profile" - ;; -*) - echo "zkup: could not detect shell, manually add ${ZKT_BIN_DIR} to your PATH." - exit 1 - ;; -esac - -if [[ ! -f "$PROFILE" ]]; then - echo "zkup: Profile file $PROFILE does not exist, creating it." - touch "$PROFILE" -fi - -if [[ "$SHELL" == *"/fish"* ]]; then - echo -e "\n# Added by zkup\nfish_add_path -a $ZKT_BIN_DIR" >>"$PROFILE" - echo "zkup: Added $ZKT_BIN_DIR to PATH in $PROFILE using fish_add_path." -else - echo -e "\n# Added by zkup\nexport PATH=\"\$PATH:$ZKT_BIN_DIR\"" >>"$PROFILE" - echo "zkup: Added $ZKT_BIN_DIR to PATH in $PROFILE." -fi - -echo -echo "Added zkup to PATH." -echo "Run 'source $PROFILE' or start a new terminal session to use zkup." -echo "Then run 'zkup' to install ZK Toolbox." diff --git a/zk_toolbox/zkup/zkup b/zk_toolbox/zkup/zkup deleted file mode 100755 index e6ca1748738..00000000000 --- a/zk_toolbox/zkup/zkup +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -BASE_DIR=${XDG_CONFIG_HOME:-$HOME} -ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} -ZKT_BIN_DIR="$ZKT_DIR/bin" - -ZKUP_INSTALL_SUPERVISOR=0 -ZKUP_INSTALL_INCEPTION=0 -ZKUP_ALIAS=0 - -BINS=() - -main() { - parse_args "$@" - - zktoolbox_banner - - check_prerequisites - mkdir -p "$ZKT_BIN_DIR" - - set_bins - - if [ -n "$ZKUP_PATH" ]; then - install_local - else - install_from_repo - fi - - zktoolbox_banner - - for bin in "${BINS[@]}"; do - success "Installed $bin to $ZKT_BIN_DIR/$bin" - done - - if [ $ZKUP_ALIAS -eq 1 ]; then - create_alias - fi -} - -PREREQUISITES=(cargo git) - -check_prerequisites() { - say "Checking prerequisites" - - failed_prerequisites=() - for prerequisite in "${PREREQUISITES[@]}"; do - if ! check_prerequisite "$prerequisite"; then - failed_prerequisites+=("$prerequisite") - fi - done - if [ ${#failed_prerequisites[@]} -gt 0 ]; then - err "The following prerequisites are missing: ${failed_prerequisites[*]}" - exit 1 - fi -} - -check_prerequisite() { - command -v "$1" &>/dev/null -} - -parse_args() { - while [[ $# -gt 0 ]]; do - case $1 in - --) - shift - break - ;; - - -p | --path) - shift - ZKUP_PATH=$1 - ;; - -r | --repo) - shift - ZKUP_REPO=$1 - ;; - -b | --branch) - shift - ZKUP_BRANCH=$1 - ;; - -c | --commit) - shift - ZKUP_COMMIT=$1 - ;; - -v | --version) - shift - ZKUP_VERSION=$1 - ;; - --inception) ZKUP_INSTALL_INCEPTION=1 ;; - --supervisor) ZKUP_INSTALL_SUPERVISOR=1 ;; - -a | --alias) ZKUP_ALIAS=1 ;; - -h | --help) - usage - exit 0 - ;; - *) - err "Unknown argument: $1" - usage - exit 1 - ;; - esac - shift - done -} - -usage() { - cat < Specify a local path to install zk_toolbox from. Ignored if --repo is provided. - -r, --repo GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". - -b, --branch Git branch to use when installing from a repository. Ignored if --commit or --version is provided. - -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. - -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. - -a, --alias Create aliases zki and zks for zk_inception and zk_supervisor binaries. - --inception Installs the zk_inception binary. Default is to install both zk_inception and zk_supervisor binaries. - --supervisor Installs the zk_supervisor binary. Default is to install both zk_inception and zk_supervisor binaries. - -h, --help Show this help message and exit. - -Examples: - $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 - $(basename "$0") --path /path/to/local/zk_toolbox --inception -EOF -} - -set_bins() { - if [ $ZKUP_INSTALL_INCEPTION -eq 1 ]; then - BINS+=(zk_inception) - fi - - if [ $ZKUP_INSTALL_SUPERVISOR -eq 1 ]; then - BINS+=(zk_supervisor) - fi - - # Installs both binaries if not option is provided - if [ ${#BINS[@]} -eq 0 ]; then - BINS=(zk_inception zk_supervisor) - fi -} - -install_local() { - if [ ! -d "$ZKUP_PATH/zk_toolbox" ]; then - err "Path $ZKUP_PATH does not contain zk_toolbox" - exit 1 - fi - - if [ -n "$ZKUP_BRANCH" ] || [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_VERSION" ] || [ -n "$ZKUP_REPO" ]; then - warn "Ignoring --repo, --branch, --commit and --version arguments when installing from local path" - fi - - say "Installing zk_toolbox from $ZKUP_PATH" - ensure cd "$ZKUP_PATH"/zk_toolbox - - for bin in "${BINS[@]}"; do - say "Installing $bin" - ensure cargo install --root $ZKT_DIR --path ./crates/$bin --force - done -} - -install_from_repo() { - if [ -n "$ZKUP_PATH" ]; then - warn "Ignoring --path argument when installing from repository" - fi - - ZKUP_REPO=${ZKUP_REPO:-"matter-labs/zksync-era"} - - say "Installing zk_toolbox from $ZKUP_REPO" - - if [ -n "$ZKUP_VERSION" ]; then - if [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_BRANCH" ]; then - warn "Ignoring --commit and --branch arguments when installing by version" - fi - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --tag "zk_toolbox-v$ZKUP_VERSION" --locked "${BINS[@]}" --force - elif [ -n "$ZKUP_COMMIT" ]; then - if [ -n "$ZKUP_BRANCH" ]; then - warn "Ignoring --branch argument when installing by commit" - fi - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --rev "$ZKUP_COMMIT" --locked "${BINS[@]}" --force - elif [ -n "$ZKUP_BRANCH" ]; then - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --branch "$ZKUP_BRANCH" --locked "${BINS[@]}" --force - else - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --locked "${BINS[@]}" --force - fi -} - -create_alias() { - if [[ "${BINS[@]}" =~ "zk_inception" ]]; then - say "Creating alias 'zki' for zk_inception" - ensure ln -sf "$ZKT_BIN_DIR/zk_inception" "$ZKT_BIN_DIR/zki" - fi - - if [[ "${BINS[@]}" =~ "zk_supervisor" ]]; then - say "Creating alias 'zks' for zk_supervisor" - ensure ln -sf "$ZKT_BIN_DIR/zk_supervisor" "$ZKT_BIN_DIR/zks" - fi -} - -ensure() { - if ! "$@"; then - err "command failed: $*" - exit 1 - fi -} - -say() { - local action="${1%% *}" - local rest="${1#"$action" }" - - echo -e "\033[1;32m$action\033[0m $rest" -} - -success() { - echo -e "\033[1;32m$1\033[0m" -} - -warn() { - echo -e "\033[1;33mWARNING: $1\033[0m" -} - -err() { - echo -e "\033[1;31mERROR: $1\033[0m" >&2 -} - -zktoolbox_banner() { - printf ' - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -███████╗██╗ ██╗ ████████╗ ██████╗ ██████╗ ██╗ ██████╗ ██████╗ ██╗ ██╗ -╚══███╔╝██║ ██╔╝ ╚══██╔══╝██╔═══██╗██╔═══██╗██║ ██╔══██╗██╔═══██╗╚██╗██╔╝ - ███╔╝ █████╔╝ ██║ ██║ ██║██║ ██║██║ ██████╔╝██║ ██║ ╚███╔╝ - ███╔╝ ██╔═██╗ ██║ ██║ ██║██║ ██║██║ ██╔══██╗██║ ██║ ██╔██╗ -███████╗██║ ██╗ ██║ ╚██████╔╝╚██████╔╝███████╗██████╔╝╚██████╔╝██╔╝ ██╗ -╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚═════╝ ╚═════╝ ╚═╝ ╚═╝ - - - A Comprehensive Toolkit for Creating and Managing ZK Stack Chains - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -Repo : https://github.com/matter-labs/zksync-era/ -Docs : https://docs.zksync.io/ -Contribute : https://github.com/matter-labs/zksync-era/pulls - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -' -} - -main "$@" diff --git a/zk_toolbox/CHANGELOG.md b/zkstack_cli/CHANGELOG.md similarity index 100% rename from zk_toolbox/CHANGELOG.md rename to zkstack_cli/CHANGELOG.md diff --git a/zk_toolbox/Cargo.lock b/zkstack_cli/Cargo.lock similarity index 97% rename from zk_toolbox/Cargo.lock rename to zkstack_cli/Cargo.lock index 77316756c26..8750de36c75 100644 --- a/zk_toolbox/Cargo.lock +++ b/zkstack_cli/Cargo.lock @@ -14,9 +14,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -161,9 +161,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -172,9 +172,9 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", @@ -531,9 +531,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.23" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bbb537bb4a30b90362caddba8f360c0a56bc13d3a5570028e7197204cb54a17" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ "jobserver", "libc", @@ -579,9 +579,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -598,9 +598,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -1883,9 +1883,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1898,9 +1898,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1908,15 +1908,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1936,9 +1936,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-locks" @@ -1952,9 +1952,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -1963,15 +1963,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1985,9 +1985,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -2036,9 +2036,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git_version_macro" @@ -2099,7 +2099,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2118,7 +2118,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2141,6 +2141,12 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "hashers" version = "1.0.1" @@ -2273,9 +2279,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -2368,7 +2374,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -2531,12 +2537,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -2572,9 +2578,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is_terminal_polyfill" @@ -2635,9 +2641,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" dependencies = [ "wasm-bindgen", ] @@ -3157,21 +3163,18 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.20.1" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" -dependencies = [ - "portable-atomic", -] +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "open-fastrlp" @@ -3503,7 +3506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.6.0", ] [[package]] @@ -3571,18 +3574,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", @@ -3720,9 +3723,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] @@ -3827,7 +3830,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.79", @@ -4104,7 +4107,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "serde", "serde_json", "serde_urlencoded", @@ -4276,9 +4279,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "once_cell", "rustls-pki-types", @@ -4298,11 +4301,10 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] @@ -4389,9 +4391,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -4723,7 +4725,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -5004,7 +5006,7 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.5.0", + "indexmap 2.6.0", "log", "memchr", "once_cell", @@ -5179,7 +5181,7 @@ dependencies = [ "enum_dispatch", "fancy-regex", "getrandom", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.13.0", "lazy-regex", "nohash-hasher", @@ -5208,7 +5210,7 @@ dependencies = [ "ahash", "enum_dispatch", "fancy-regex", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.13.0", "nohash-hasher", "pretty_assertions", @@ -5440,12 +5442,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ "rustix", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -5628,7 +5630,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -5699,7 +5701,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] @@ -5710,7 +5712,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -5979,9 +5981,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -6006,9 +6008,9 @@ dependencies = [ [[package]] name = "unicode-properties" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-width" @@ -6192,9 +6194,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" dependencies = [ "cfg-if", "once_cell", @@ -6203,9 +6205,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" dependencies = [ "bumpalo", "log", @@ -6218,9 +6220,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "65471f79c1022ffa5291d33520cbbb53b7687b01c2f8e83b57d102eed7ed479d" dependencies = [ "cfg-if", "js-sys", @@ -6230,9 +6232,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6240,9 +6242,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" dependencies = [ "proc-macro2", "quote", @@ -6253,15 +6255,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "44188d185b5bdcae1052d08bcbcf9091a5524038d4572cc4f4f2bb9d5554ddd9" dependencies = [ "js-sys", "wasm-bindgen", @@ -6315,7 +6317,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -6685,10 +6687,26 @@ dependencies = [ ] [[package]] -name = "zk_inception" +name = "zkevm_opcode_defs" +version = "0.132.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" +dependencies = [ + "bitflags 2.6.0", + "blake2", + "ethereum-types", + "k256 0.11.6", + "lazy_static", + "sha2_ce", + "sha3_ce", +] + +[[package]] +name = "zkstack" version = "0.1.0" dependencies = [ "anyhow", + "chrono", "clap", "clap-markdown", "cliclack", @@ -6696,6 +6714,7 @@ dependencies = [ "config", "ethers", "eyre", + "futures", "human-panic", "lazy_static", "prost 0.12.6", @@ -6706,6 +6725,7 @@ dependencies = [ "serde_json", "serde_yaml", "slugify-rs", + "sqruff-lib", "strum", "thiserror", "tokio", @@ -6723,46 +6743,6 @@ dependencies = [ "zksync_protobuf_config", ] -[[package]] -name = "zk_supervisor" -version = "0.1.0" -dependencies = [ - "anyhow", - "chrono", - "clap", - "clap-markdown", - "common", - "config", - "ethers", - "futures", - "human-panic", - "serde", - "serde_json", - "serde_yaml", - "sqruff-lib", - "strum", - "tokio", - "types", - "url", - "xshell", - "zksync_basic_types", -] - -[[package]] -name = "zkevm_opcode_defs" -version = "0.132.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" -dependencies = [ - "bitflags 2.6.0", - "blake2", - "ethereum-types", - "k256 0.11.6", - "lazy_static", - "sha2_ce", - "sha3_ce", -] - [[package]] name = "zksync_basic_types" version = "0.1.0" diff --git a/zk_toolbox/Cargo.toml b/zkstack_cli/Cargo.toml similarity index 90% rename from zk_toolbox/Cargo.toml rename to zkstack_cli/Cargo.toml index cb442a6182e..a805cf85d51 100644 --- a/zk_toolbox/Cargo.toml +++ b/zkstack_cli/Cargo.toml @@ -3,8 +3,7 @@ members = [ "crates/common", "crates/config", "crates/types", - "crates/zk_inception", - "crates/zk_supervisor", + "crates/zkstack", "crates/git_version_macro", ] resolver = "2" @@ -16,8 +15,8 @@ homepage = "https://zksync.io/" license = "MIT OR Apache-2.0" authors = ["The Matter Labs Team "] exclude = ["./github"] -repository = "https://github.com/matter-labs/zk_toolbox/" -description = "ZK Toolbox is a set of tools for working with zk stack." +repository = "https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli/" +description = "ZK Stack CLI is a set of tools for working with zk stack." keywords = ["zk", "cryptography", "blockchain", "ZKStack", "ZKsync"] diff --git a/zk_toolbox/README.md b/zkstack_cli/README.md similarity index 85% rename from zk_toolbox/README.md rename to zkstack_cli/README.md index 6197a79eec9..f1c92cc3d2e 100644 --- a/zk_toolbox/README.md +++ b/zkstack_cli/README.md @@ -1,11 +1,7 @@ -# zk_toolbox +# ZK Stack CLI -Toolkit for creating and managing ZK Stack chains. - -## ZK Inception - -`ZK Inception` facilitates the creation and management of ZK Stacks. Commands are interactive but can also accept -arguments via the command line. +Toolkit for creating and managing ZK Stack chains. `ZK Stack CLI` facilitates the creation and management of ZK Stacks. +Commands are interactive but can also accept arguments via the command line. ### Dependencies @@ -14,19 +10,25 @@ dependencies on your machine. Ignore the Environment section for now. ### Installation -Install `zk_inception` from Git: +You can use `zkstackup` to install and manage `zkstack`: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +Then install the most recent version with: ```bash -cargo install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor --force +zkstackup ``` Or manually build from a local copy of the [ZKsync](https://github.com/matter-labs/zksync-era/) repository: ```bash -./bin/zkt +zkstackup --local ``` -This command installs `zk_inception` and `zk_supervisor` from the current repository. +This command installs `zkstack` from the current repository. ### Foundry Integration @@ -51,13 +53,13 @@ BridgeHub, shared bridges, and state transition managers. To create a ZK Stack project, start by creating an ecosystem: ```bash -zk_inception ecosystem create +zkstack ecosystem create ``` If you choose not to start database & L1 containers after creating the ecosystem, you can later run: ```bash -zk_inception containers +zkstack containers ``` Execute subsequent commands from within the created ecosystem folder: @@ -71,14 +73,14 @@ cd path/to/ecosystem/name If the ecosystem has never been deployed before, initialize it: ```bash -zk_inception ecosystem init +zkstack ecosystem init ``` This initializes the first ZK chain, which becomes the default. Override with `--chain ` if needed. For default params, use: ```bash -zk_inception ecosystem init --dev +zkstack ecosystem init --dev ``` If the process gets stuck, resume it with `--resume`. This flag keeps track of already sent transactions and sends new @@ -98,7 +100,7 @@ To verify contracts, use the `--verify` flag. To change the default ZK chain: ```bash -zk_inception ecosystem change-default-chain +zkstack ecosystem change-default-chain ``` IMPORTANT: Currently, you cannot use an existing ecosystem to register a new chain. This feature will be added in the @@ -109,19 +111,19 @@ future. To setup [era-observability](https://github.com/matter-labs/era-observability): ```bash -zk_inception ecosystem setup-observability +zkstack ecosystem setup-observability ``` Or run: ```bash -zk_inception ecosystem init --observability +zkstack ecosystem init --observability ``` To start observability containers: ```bash -zk_inception containers --observability +zkstack containers --observability ``` ### ZK Chain @@ -131,7 +133,7 @@ zk_inception containers --observability The first ZK chain is generated upon ecosystem creation. Create additional chains and switch between them: ```bash -zk_inception chain create +zkstack chain create ``` #### Init @@ -139,7 +141,7 @@ zk_inception chain create Deploy contracts and initialize Zk Chain: ```bash -zk_inception chain init +zkstack chain init ``` This registers the chain in the BridgeHub and deploys all necessary contracts. Manual initialization steps: @@ -154,7 +156,7 @@ by a third party). To run the chain: ```bash -zk_inception server +zkstack server ``` You can specify the component you want to run using `--components` flag @@ -180,13 +182,13 @@ information. Initialize the prover: ```bash -zk_inception prover init +zkstack prover init ``` Run the prover: ```bash -zk_inception prover run +zkstack prover run ``` Specify the prover component with `--component `. Components: @@ -202,13 +204,13 @@ For `witness-generator`, specify the round with `--round `. Rounds: Download required binaries (`solc`, `zksolc`, `vyper`, `zkvyper`): ```bash -zk_inception contract-verifier init +zkstack contract-verifier init ``` Run the contract verifier: ```bash -zk_inception contract-verifier run +zkstack contract-verifier run ``` ### External Node @@ -220,7 +222,7 @@ Commands for running an external node: Prepare configs: ```bash -zk_inception en configs +zkstack en configs ``` This ensures no port conflicts with the main node. @@ -230,7 +232,7 @@ This ensures no port conflicts with the main node. Prepare the databases: ```bash -zk_inception en init +zkstack en init ``` #### Run @@ -238,7 +240,7 @@ zk_inception en init Run the external node: ```bash -zk_inception en run +zkstack en run ``` ### Portal @@ -247,7 +249,7 @@ Once you have at least one chain initialized, you can run the [portal](https://g web-app to bridge tokens between L1 and L2 and more: ```bash -zk_inception portal +zkstack portal ``` This command will start the dockerized portal app using configuration from `apps/portal.config.json` file inside your @@ -263,7 +265,7 @@ contracts and more. First, each chain should be initialized: ```bash -zk_inception explorer init +zkstack explorer init ``` This command creates a database to store explorer data and generatesdocker compose file with explorer services @@ -272,7 +274,7 @@ This command creates a database to store explorer data and generatesdocker compo Next, for each chain you want to have an explorer, you need to start its backend services: ```bash -zk_inception explorer backend --chain +zkstack explorer backend --chain ``` This command uses previously created docker compose file to start the services (api, data fetcher, worker) required for @@ -281,7 +283,7 @@ the explorer. Finally, you can run the explorer app: ```bash -zk_inception explorer run +zkstack explorer run ``` This command will start the dockerized explorer app using configuration from `apps/explorer.config.json` file inside @@ -293,22 +295,22 @@ your ecosystem directory. You can edit this file to configure the app if needed. To update your node: ```bash -zk_inception update +zkstack update ``` This command pulls the latest changes, syncs the general config for all chains, and raises a warning if L1 upgrades are needed. -## ZK Supervisor +## Dev -Tools for developing ZKsync. +The subcommand `zkstack dev` offers tools for developing ZKsync. ### Database Commands for database manipulation: ```bash -zk_supervisor db +zkstack dev db ``` Possible commands: @@ -326,7 +328,7 @@ Possible commands: Clean artifacts: ```bash -zk_supervisor clean +zkstack dev clean ``` Possible commands: @@ -340,7 +342,7 @@ Possible commands: Run ZKsync tests: ```bash -zk_supervisor test +zkstack dev test ``` Possible commands: @@ -358,7 +360,7 @@ Possible commands: Create a snapshot of the current chain: ```bash -zks snapshot create +zkstack dev snapshot create ``` ### Contracts @@ -366,7 +368,7 @@ zks snapshot create Build contracts: ```bash -zks contracts +zkstack dev contracts ``` ### Format @@ -374,7 +376,7 @@ zks contracts Format code: ```bash -zks fmt +zkstack dev fmt ``` By default, this command runs all formatters. To run a specific fomatter use the following subcommands: @@ -388,7 +390,7 @@ By default, this command runs all formatters. To run a specific fomatter use the Lint code: ```bash -zks lint +zkstack dev lint ``` By default, this command runs the linter on all files. To target specific file types, use the `--target` option. diff --git a/zk_toolbox/crates/common/Cargo.toml b/zkstack_cli/crates/common/Cargo.toml similarity index 100% rename from zk_toolbox/crates/common/Cargo.toml rename to zkstack_cli/crates/common/Cargo.toml diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zkstack_cli/crates/common/src/cmd.rs similarity index 100% rename from zk_toolbox/crates/common/src/cmd.rs rename to zkstack_cli/crates/common/src/cmd.rs diff --git a/zk_toolbox/crates/common/src/config.rs b/zkstack_cli/crates/common/src/config.rs similarity index 100% rename from zk_toolbox/crates/common/src/config.rs rename to zkstack_cli/crates/common/src/config.rs diff --git a/zk_toolbox/crates/common/src/db.rs b/zkstack_cli/crates/common/src/db.rs similarity index 100% rename from zk_toolbox/crates/common/src/db.rs rename to zkstack_cli/crates/common/src/db.rs diff --git a/zk_toolbox/crates/common/src/docker.rs b/zkstack_cli/crates/common/src/docker.rs similarity index 100% rename from zk_toolbox/crates/common/src/docker.rs rename to zkstack_cli/crates/common/src/docker.rs diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zkstack_cli/crates/common/src/ethereum.rs similarity index 100% rename from zk_toolbox/crates/common/src/ethereum.rs rename to zkstack_cli/crates/common/src/ethereum.rs diff --git a/zk_toolbox/crates/common/src/external_node.rs b/zkstack_cli/crates/common/src/external_node.rs similarity index 100% rename from zk_toolbox/crates/common/src/external_node.rs rename to zkstack_cli/crates/common/src/external_node.rs diff --git a/zk_toolbox/crates/common/src/files.rs b/zkstack_cli/crates/common/src/files.rs similarity index 100% rename from zk_toolbox/crates/common/src/files.rs rename to zkstack_cli/crates/common/src/files.rs diff --git a/zk_toolbox/crates/common/src/forge.rs b/zkstack_cli/crates/common/src/forge.rs similarity index 99% rename from zk_toolbox/crates/common/src/forge.rs rename to zkstack_cli/crates/common/src/forge.rs index e573e492aa4..bef285fb89b 100644 --- a/zk_toolbox/crates/common/src/forge.rs +++ b/zkstack_cli/crates/common/src/forge.rs @@ -278,7 +278,7 @@ pub struct ForgeScriptArgs { pub resume: bool, /// List of additional arguments that can be passed through the CLI. /// - /// e.g.: `zk_inception init -a --private-key=` + /// e.g.: `zkstack init -a --private-key=` #[clap(long, short)] #[arg(trailing_var_arg = true, allow_hyphen_values = true, hide = false)] additional_args: Vec, diff --git a/zk_toolbox/crates/common/src/git.rs b/zkstack_cli/crates/common/src/git.rs similarity index 100% rename from zk_toolbox/crates/common/src/git.rs rename to zkstack_cli/crates/common/src/git.rs diff --git a/zk_toolbox/crates/common/src/lib.rs b/zkstack_cli/crates/common/src/lib.rs similarity index 100% rename from zk_toolbox/crates/common/src/lib.rs rename to zkstack_cli/crates/common/src/lib.rs diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zkstack_cli/crates/common/src/prerequisites.rs similarity index 100% rename from zk_toolbox/crates/common/src/prerequisites.rs rename to zkstack_cli/crates/common/src/prerequisites.rs diff --git a/zk_toolbox/crates/common/src/prompt/confirm.rs b/zkstack_cli/crates/common/src/prompt/confirm.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/confirm.rs rename to zkstack_cli/crates/common/src/prompt/confirm.rs diff --git a/zk_toolbox/crates/common/src/prompt/input.rs b/zkstack_cli/crates/common/src/prompt/input.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/input.rs rename to zkstack_cli/crates/common/src/prompt/input.rs diff --git a/zk_toolbox/crates/common/src/prompt/mod.rs b/zkstack_cli/crates/common/src/prompt/mod.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/mod.rs rename to zkstack_cli/crates/common/src/prompt/mod.rs diff --git a/zk_toolbox/crates/common/src/prompt/select.rs b/zkstack_cli/crates/common/src/prompt/select.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/select.rs rename to zkstack_cli/crates/common/src/prompt/select.rs diff --git a/zk_toolbox/crates/common/src/server.rs b/zkstack_cli/crates/common/src/server.rs similarity index 100% rename from zk_toolbox/crates/common/src/server.rs rename to zkstack_cli/crates/common/src/server.rs diff --git a/zk_toolbox/crates/common/src/term/error.rs b/zkstack_cli/crates/common/src/term/error.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/error.rs rename to zkstack_cli/crates/common/src/term/error.rs diff --git a/zk_toolbox/crates/common/src/term/logger.rs b/zkstack_cli/crates/common/src/term/logger.rs similarity index 97% rename from zk_toolbox/crates/common/src/term/logger.rs rename to zkstack_cli/crates/common/src/term/logger.rs index 17e518d9ad9..035e81dc135 100644 --- a/zk_toolbox/crates/common/src/term/logger.rs +++ b/zkstack_cli/crates/common/src/term/logger.rs @@ -14,7 +14,7 @@ fn term_write(msg: impl Display) { } pub fn intro() { - cliclak_intro(style(" ZKsync toolbox ").on_cyan().black()).unwrap(); + cliclak_intro(style(" ZK Stack CLI ").on_cyan().black()).unwrap(); } pub fn outro(msg: impl Display) { diff --git a/zk_toolbox/crates/common/src/term/mod.rs b/zkstack_cli/crates/common/src/term/mod.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/mod.rs rename to zkstack_cli/crates/common/src/term/mod.rs diff --git a/zk_toolbox/crates/common/src/term/spinner.rs b/zkstack_cli/crates/common/src/term/spinner.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/spinner.rs rename to zkstack_cli/crates/common/src/term/spinner.rs diff --git a/zk_toolbox/crates/common/src/version.rs b/zkstack_cli/crates/common/src/version.rs similarity index 100% rename from zk_toolbox/crates/common/src/version.rs rename to zkstack_cli/crates/common/src/version.rs diff --git a/zk_toolbox/crates/common/src/wallets.rs b/zkstack_cli/crates/common/src/wallets.rs similarity index 100% rename from zk_toolbox/crates/common/src/wallets.rs rename to zkstack_cli/crates/common/src/wallets.rs diff --git a/zk_toolbox/crates/common/src/yaml.rs b/zkstack_cli/crates/common/src/yaml.rs similarity index 100% rename from zk_toolbox/crates/common/src/yaml.rs rename to zkstack_cli/crates/common/src/yaml.rs diff --git a/zk_toolbox/crates/config/Cargo.toml b/zkstack_cli/crates/config/Cargo.toml similarity index 100% rename from zk_toolbox/crates/config/Cargo.toml rename to zkstack_cli/crates/config/Cargo.toml diff --git a/zk_toolbox/crates/config/src/apps.rs b/zkstack_cli/crates/config/src/apps.rs similarity index 96% rename from zk_toolbox/crates/config/src/apps.rs rename to zkstack_cli/crates/config/src/apps.rs index 697b35b0851..3bd611bdc32 100644 --- a/zk_toolbox/crates/config/src/apps.rs +++ b/zkstack_cli/crates/config/src/apps.rs @@ -5,7 +5,7 @@ use xshell::Shell; use crate::{ consts::{APPS_CONFIG_FILE, DEFAULT_EXPLORER_PORT, DEFAULT_PORTAL_PORT, LOCAL_CONFIGS_PATH}, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkStackConfig}, }; /// Ecosystem level configuration for the apps (portal and explorer). @@ -20,7 +20,7 @@ pub struct AppEcosystemConfig { pub http_port: u16, } -impl ZkToolboxConfig for AppsEcosystemConfig {} +impl ZkStackConfig for AppsEcosystemConfig {} impl FileConfigWithDefaultName for AppsEcosystemConfig { const FILE_NAME: &'static str = APPS_CONFIG_FILE; } diff --git a/zk_toolbox/crates/config/src/chain.rs b/zkstack_cli/crates/config/src/chain.rs similarity index 98% rename from zk_toolbox/crates/config/src/chain.rs rename to zkstack_cli/crates/config/src/chain.rs index d6b6e2b866b..6c82d6ef3c3 100644 --- a/zk_toolbox/crates/config/src/chain.rs +++ b/zkstack_cli/crates/config/src/chain.rs @@ -16,7 +16,7 @@ use crate::{ create_localhost_wallets, traits::{ FileConfigWithDefaultName, ReadConfig, ReadConfigWithBasePath, SaveConfig, - SaveConfigWithBasePath, ZkToolboxConfig, + SaveConfigWithBasePath, ZkStackConfig, }, ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, }; @@ -165,4 +165,4 @@ impl FileConfigWithDefaultName for ChainConfigInternal { const FILE_NAME: &'static str = CONFIG_NAME; } -impl ZkToolboxConfig for ChainConfigInternal {} +impl ZkStackConfig for ChainConfigInternal {} diff --git a/zk_toolbox/crates/config/src/consensus_config.rs b/zkstack_cli/crates/config/src/consensus_config.rs similarity index 100% rename from zk_toolbox/crates/config/src/consensus_config.rs rename to zkstack_cli/crates/config/src/consensus_config.rs diff --git a/zk_toolbox/crates/config/src/consensus_secrets.rs b/zkstack_cli/crates/config/src/consensus_secrets.rs similarity index 100% rename from zk_toolbox/crates/config/src/consensus_secrets.rs rename to zkstack_cli/crates/config/src/consensus_secrets.rs diff --git a/zk_toolbox/crates/config/src/consts.rs b/zkstack_cli/crates/config/src/consts.rs similarity index 100% rename from zk_toolbox/crates/config/src/consts.rs rename to zkstack_cli/crates/config/src/consts.rs diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zkstack_cli/crates/config/src/contracts.rs similarity index 97% rename from zk_toolbox/crates/config/src/contracts.rs rename to zkstack_cli/crates/config/src/contracts.rs index 8296aa18852..e6676989e68 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zkstack_cli/crates/config/src/contracts.rs @@ -11,7 +11,7 @@ use crate::{ }, register_chain::output::RegisterChainOutput, }, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Deserialize, Serialize, Clone, Default)] @@ -114,7 +114,7 @@ impl FileConfigWithDefaultName for ContractsConfig { const FILE_NAME: &'static str = CONTRACTS_FILE; } -impl ZkToolboxConfig for ContractsConfig {} +impl ZkStackConfig for ContractsConfig {} #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] pub struct EcosystemContracts { @@ -125,7 +125,7 @@ pub struct EcosystemContracts { pub diamond_cut_data: String, } -impl ZkToolboxConfig for EcosystemContracts {} +impl ZkStackConfig for EcosystemContracts {} #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct BridgesContracts { diff --git a/zk_toolbox/crates/config/src/docker_compose.rs b/zkstack_cli/crates/config/src/docker_compose.rs similarity index 94% rename from zk_toolbox/crates/config/src/docker_compose.rs rename to zkstack_cli/crates/config/src/docker_compose.rs index 05c6e73eaea..2208c5a8654 100644 --- a/zk_toolbox/crates/config/src/docker_compose.rs +++ b/zkstack_cli/crates/config/src/docker_compose.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Default, Serialize, Deserialize, Clone)] pub struct DockerComposeConfig { @@ -34,7 +34,7 @@ pub struct DockerComposeService { pub other: serde_json::Value, } -impl ZkToolboxConfig for DockerComposeConfig {} +impl ZkStackConfig for DockerComposeConfig {} impl DockerComposeConfig { pub fn add_service(&mut self, name: &str, service: DockerComposeService) { diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zkstack_cli/crates/config/src/ecosystem.rs similarity index 98% rename from zk_toolbox/crates/config/src/ecosystem.rs rename to zkstack_cli/crates/config/src/ecosystem.rs index 7ac81cd5394..79cb1c4ea27 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zkstack_cli/crates/config/src/ecosystem.rs @@ -21,7 +21,7 @@ use crate::{ input::{Erc20DeploymentConfig, InitialDeploymentConfig}, output::{ERC20Tokens, Erc20Token}, }, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkStackConfig}, ChainConfig, ChainConfigInternal, ContractsConfig, WalletsConfig, }; @@ -94,9 +94,9 @@ impl FileConfigWithDefaultName for EcosystemConfig { const FILE_NAME: &'static str = CONFIG_NAME; } -impl ZkToolboxConfig for EcosystemConfigInternal {} +impl ZkStackConfig for EcosystemConfigInternal {} -impl ZkToolboxConfig for EcosystemConfig {} +impl ZkStackConfig for EcosystemConfig {} impl EcosystemConfig { fn get_shell(&self) -> &Shell { diff --git a/zk_toolbox/crates/config/src/explorer.rs b/zkstack_cli/crates/config/src/explorer.rs similarity index 98% rename from zk_toolbox/crates/config/src/explorer.rs rename to zkstack_cli/crates/config/src/explorer.rs index ee7a59e5105..7ce9b986a1e 100644 --- a/zk_toolbox/crates/config/src/explorer.rs +++ b/zkstack_cli/crates/config/src/explorer.rs @@ -8,7 +8,7 @@ use crate::{ EXPLORER_CONFIG_FILE, EXPLORER_JS_CONFIG_FILE, LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, }, - traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{ReadConfig, SaveConfig, ZkStackConfig}, }; /// Explorer JSON configuration file. This file contains configuration for the explorer app. @@ -144,4 +144,4 @@ impl Default for ExplorerConfig { } } -impl ZkToolboxConfig for ExplorerConfig {} +impl ZkStackConfig for ExplorerConfig {} diff --git a/zk_toolbox/crates/config/src/explorer_compose.rs b/zkstack_cli/crates/config/src/explorer_compose.rs similarity index 98% rename from zk_toolbox/crates/config/src/explorer_compose.rs rename to zkstack_cli/crates/config/src/explorer_compose.rs index ca9abc1e3e2..13dd665d2e3 100644 --- a/zk_toolbox/crates/config/src/explorer_compose.rs +++ b/zkstack_cli/crates/config/src/explorer_compose.rs @@ -16,7 +16,7 @@ use crate::{ EXPLORER_WORKER_DOCKER_IMAGE, LOCAL_CHAINS_PATH, LOCAL_CONFIGS_PATH, }, docker_compose::{DockerComposeConfig, DockerComposeService}, - traits::ZkToolboxConfig, + traits::ZkStackConfig, EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, }; @@ -72,7 +72,7 @@ pub struct ExplorerBackendComposeConfig { pub docker_compose: DockerComposeConfig, } -impl ZkToolboxConfig for ExplorerBackendComposeConfig {} +impl ZkStackConfig for ExplorerBackendComposeConfig {} impl ExplorerBackendComposeConfig { const API_NAME: &'static str = "api"; diff --git a/zk_toolbox/crates/config/src/external_node.rs b/zkstack_cli/crates/config/src/external_node.rs similarity index 100% rename from zk_toolbox/crates/config/src/external_node.rs rename to zkstack_cli/crates/config/src/external_node.rs diff --git a/zk_toolbox/crates/config/src/file_config.rs b/zkstack_cli/crates/config/src/file_config.rs similarity index 100% rename from zk_toolbox/crates/config/src/file_config.rs rename to zkstack_cli/crates/config/src/file_config.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs b/zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs similarity index 71% rename from zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs index 636cffc49f8..4f73483b393 100644 --- a/zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs @@ -1,9 +1,9 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; -impl ZkToolboxConfig for AcceptOwnershipInput {} +impl ZkStackConfig for AcceptOwnershipInput {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct AcceptOwnershipInput { diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs similarity index 97% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 41ce906f455..d5611f805b1 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -10,7 +10,7 @@ use zksync_basic_types::L2ChainId; use crate::{ consts::INITIAL_DEPLOYMENT_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, ContractsConfig, GenesisConfig, WalletsConfig, }; @@ -61,7 +61,7 @@ impl FileConfigWithDefaultName for InitialDeploymentConfig { const FILE_NAME: &'static str = INITIAL_DEPLOYMENT_FILE; } -impl ZkToolboxConfig for InitialDeploymentConfig {} +impl ZkStackConfig for InitialDeploymentConfig {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Erc20DeploymentConfig { @@ -72,7 +72,7 @@ impl FileConfigWithDefaultName for Erc20DeploymentConfig { const FILE_NAME: &'static str = INITIAL_DEPLOYMENT_FILE; } -impl ZkToolboxConfig for Erc20DeploymentConfig {} +impl ZkStackConfig for Erc20DeploymentConfig {} impl Default for Erc20DeploymentConfig { fn default() -> Self { @@ -115,7 +115,7 @@ pub struct DeployL1Config { pub tokens: TokensDeployL1Config, } -impl ZkToolboxConfig for DeployL1Config {} +impl ZkStackConfig for DeployL1Config {} impl DeployL1Config { pub fn new( @@ -212,7 +212,7 @@ pub struct DeployErc20Config { pub additional_addresses_for_minting: Vec
, } -impl ZkToolboxConfig for DeployErc20Config {} +impl ZkStackConfig for DeployErc20Config {} impl DeployErc20Config { pub fn new( diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs similarity index 95% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs index 7f35cf0357c..7a922cbdf3c 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{ consts::ERC20_CONFIGS_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Deserialize, Serialize, Clone)] @@ -21,7 +21,7 @@ pub struct DeployL1Output { pub deployed_addresses: DeployL1DeployedAddressesOutput, } -impl ZkToolboxConfig for DeployL1Output {} +impl ZkStackConfig for DeployL1Output {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct DeployL1ContractsConfigOutput { @@ -98,4 +98,4 @@ impl FileConfigWithDefaultName for ERC20Tokens { const FILE_NAME: &'static str = ERC20_CONFIGS_FILE; } -impl ZkToolboxConfig for ERC20Tokens {} +impl ZkStackConfig for ERC20Tokens {} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs similarity index 92% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs index b20b58f99c5..3836dca9d24 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs @@ -2,9 +2,9 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig}; +use crate::{traits::ZkStackConfig, ChainConfig}; -impl ZkToolboxConfig for DeployL2ContractsInput {} +impl ZkStackConfig for DeployL2ContractsInput {} /// Fields corresponding to `contracts/l1-contracts/deploy-script-config-template/config-deploy-l2-config.toml` /// which are read by `contracts/l1-contracts/deploy-scripts/DeployL2Contracts.sol`. diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs similarity index 73% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index ca5cac12c02..29be89b9101 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -1,12 +1,12 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; -impl ZkToolboxConfig for InitializeBridgeOutput {} -impl ZkToolboxConfig for DefaultL2UpgradeOutput {} -impl ZkToolboxConfig for ConsensusRegistryOutput {} -impl ZkToolboxConfig for Multicall3Output {} +impl ZkStackConfig for InitializeBridgeOutput {} +impl ZkStackConfig for DefaultL2UpgradeOutput {} +impl ZkStackConfig for ConsensusRegistryOutput {} +impl ZkStackConfig for Multicall3Output {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { diff --git a/zk_toolbox/crates/config/src/forge_interface/mod.rs b/zkstack_cli/crates/config/src/forge_interface/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs b/zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs similarity index 83% rename from zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs index 9631fe74318..2af7502e0b7 100644 --- a/zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs @@ -2,7 +2,7 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig}; +use crate::{traits::ZkStackConfig, ChainConfig}; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployPaymasterInput { @@ -22,11 +22,11 @@ impl DeployPaymasterInput { } } -impl ZkToolboxConfig for DeployPaymasterInput {} +impl ZkStackConfig for DeployPaymasterInput {} #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployPaymasterOutput { pub paymaster: Address, } -impl ZkToolboxConfig for DeployPaymasterOutput {} +impl ZkStackConfig for DeployPaymasterOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs similarity index 96% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs index e2e60294e86..fb7c606a456 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; +use crate::{traits::ZkStackConfig, ChainConfig, ContractsConfig}; #[derive(Debug, Deserialize, Serialize, Clone)] struct Bridgehub { @@ -50,7 +50,7 @@ pub struct ChainL1Config { pub governance_min_delay: u64, } -impl ZkToolboxConfig for RegisterChainL1Config {} +impl ZkStackConfig for RegisterChainL1Config {} impl RegisterChainL1Config { pub fn new(chain_config: &ChainConfig, contracts: &ContractsConfig) -> anyhow::Result { diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/mod.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs similarity index 75% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs index f9521b16328..a3e23f7bae4 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs @@ -1,7 +1,7 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct RegisterChainOutput { @@ -10,4 +10,4 @@ pub struct RegisterChainOutput { pub chain_admin_addr: Address, } -impl ZkToolboxConfig for RegisterChainOutput {} +impl ZkStackConfig for RegisterChainOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/script_params.rs b/zkstack_cli/crates/config/src/forge_interface/script_params.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/script_params.rs rename to zkstack_cli/crates/config/src/forge_interface/script_params.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs b/zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs similarity index 86% rename from zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs index e8189c521fb..201cf86b734 100644 --- a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{Address, L2ChainId, H256}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SetupLegacyBridgeInput { @@ -17,4 +17,4 @@ pub struct SetupLegacyBridgeInput { pub create2factory_addr: Address, } -impl ZkToolboxConfig for SetupLegacyBridgeInput {} +impl ZkStackConfig for SetupLegacyBridgeInput {} diff --git a/zk_toolbox/crates/config/src/general.rs b/zkstack_cli/crates/config/src/general.rs similarity index 100% rename from zk_toolbox/crates/config/src/general.rs rename to zkstack_cli/crates/config/src/general.rs diff --git a/zk_toolbox/crates/config/src/genesis.rs b/zkstack_cli/crates/config/src/genesis.rs similarity index 100% rename from zk_toolbox/crates/config/src/genesis.rs rename to zkstack_cli/crates/config/src/genesis.rs diff --git a/zk_toolbox/crates/config/src/lib.rs b/zkstack_cli/crates/config/src/lib.rs similarity index 100% rename from zk_toolbox/crates/config/src/lib.rs rename to zkstack_cli/crates/config/src/lib.rs diff --git a/zk_toolbox/crates/config/src/manipulations.rs b/zkstack_cli/crates/config/src/manipulations.rs similarity index 100% rename from zk_toolbox/crates/config/src/manipulations.rs rename to zkstack_cli/crates/config/src/manipulations.rs diff --git a/zk_toolbox/crates/config/src/portal.rs b/zkstack_cli/crates/config/src/portal.rs similarity index 98% rename from zk_toolbox/crates/config/src/portal.rs rename to zkstack_cli/crates/config/src/portal.rs index c787c6cc702..2b6f0ffd515 100644 --- a/zk_toolbox/crates/config/src/portal.rs +++ b/zkstack_cli/crates/config/src/portal.rs @@ -9,7 +9,7 @@ use crate::{ LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, PORTAL_CONFIG_FILE, PORTAL_JS_CONFIG_FILE, }, - traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{ReadConfig, SaveConfig, ZkStackConfig}, }; /// Portal JSON configuration file. This file contains configuration for the portal app. @@ -172,4 +172,4 @@ impl Default for PortalConfig { } } -impl ZkToolboxConfig for PortalConfig {} +impl ZkStackConfig for PortalConfig {} diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zkstack_cli/crates/config/src/secrets.rs similarity index 100% rename from zk_toolbox/crates/config/src/secrets.rs rename to zkstack_cli/crates/config/src/secrets.rs diff --git a/zk_toolbox/crates/config/src/traits.rs b/zkstack_cli/crates/config/src/traits.rs similarity index 95% rename from zk_toolbox/crates/config/src/traits.rs rename to zkstack_cli/crates/config/src/traits.rs index bb0722762e3..a4a4ad22c61 100644 --- a/zk_toolbox/crates/config/src/traits.rs +++ b/zkstack_cli/crates/config/src/traits.rs @@ -8,8 +8,8 @@ use serde::{de::DeserializeOwned, Serialize}; use url::Url; use xshell::Shell; -// Configs that we use only inside zk toolbox, we don't have protobuf implementation for them. -pub trait ZkToolboxConfig {} +// Configs that we use only inside ZK Stack CLI, we don't have protobuf implementation for them. +pub trait ZkStackConfig {} pub trait FileConfigWithDefaultName { const FILE_NAME: &'static str; @@ -19,7 +19,7 @@ pub trait FileConfigWithDefaultName { } } -impl SaveConfig for T { +impl SaveConfig for T { fn save(&self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { save_with_comment(shell, path, self, "") } @@ -49,7 +49,7 @@ pub trait ReadConfig: Sized { impl ReadConfig for T where - T: DeserializeOwned + Clone + ZkToolboxConfig, + T: DeserializeOwned + Clone + ZkStackConfig, { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let error_context = || format!("Failed to parse config file {:?}.", path.as_ref()); diff --git a/zk_toolbox/crates/config/src/wallet_creation.rs b/zkstack_cli/crates/config/src/wallet_creation.rs similarity index 100% rename from zk_toolbox/crates/config/src/wallet_creation.rs rename to zkstack_cli/crates/config/src/wallet_creation.rs diff --git a/zk_toolbox/crates/config/src/wallets.rs b/zkstack_cli/crates/config/src/wallets.rs similarity index 91% rename from zk_toolbox/crates/config/src/wallets.rs rename to zkstack_cli/crates/config/src/wallets.rs index c650781bff5..735848f6e34 100644 --- a/zk_toolbox/crates/config/src/wallets.rs +++ b/zkstack_cli/crates/config/src/wallets.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::{ consts::WALLETS_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Clone, Serialize, Deserialize)] @@ -55,6 +55,6 @@ pub(crate) struct EthMnemonicConfig { pub(crate) base_path: String, } -impl ZkToolboxConfig for EthMnemonicConfig {} +impl ZkStackConfig for EthMnemonicConfig {} -impl ZkToolboxConfig for WalletsConfig {} +impl ZkStackConfig for WalletsConfig {} diff --git a/zk_toolbox/crates/git_version_macro/Cargo.toml b/zkstack_cli/crates/git_version_macro/Cargo.toml similarity index 100% rename from zk_toolbox/crates/git_version_macro/Cargo.toml rename to zkstack_cli/crates/git_version_macro/Cargo.toml diff --git a/zk_toolbox/crates/git_version_macro/src/lib.rs b/zkstack_cli/crates/git_version_macro/src/lib.rs similarity index 100% rename from zk_toolbox/crates/git_version_macro/src/lib.rs rename to zkstack_cli/crates/git_version_macro/src/lib.rs diff --git a/zk_toolbox/crates/types/Cargo.toml b/zkstack_cli/crates/types/Cargo.toml similarity index 100% rename from zk_toolbox/crates/types/Cargo.toml rename to zkstack_cli/crates/types/Cargo.toml diff --git a/zk_toolbox/crates/types/src/base_token.rs b/zkstack_cli/crates/types/src/base_token.rs similarity index 100% rename from zk_toolbox/crates/types/src/base_token.rs rename to zkstack_cli/crates/types/src/base_token.rs diff --git a/zk_toolbox/crates/types/src/l1_network.rs b/zkstack_cli/crates/types/src/l1_network.rs similarity index 100% rename from zk_toolbox/crates/types/src/l1_network.rs rename to zkstack_cli/crates/types/src/l1_network.rs diff --git a/zk_toolbox/crates/types/src/lib.rs b/zkstack_cli/crates/types/src/lib.rs similarity index 100% rename from zk_toolbox/crates/types/src/lib.rs rename to zkstack_cli/crates/types/src/lib.rs diff --git a/zk_toolbox/crates/types/src/prover_mode.rs b/zkstack_cli/crates/types/src/prover_mode.rs similarity index 100% rename from zk_toolbox/crates/types/src/prover_mode.rs rename to zkstack_cli/crates/types/src/prover_mode.rs diff --git a/zk_toolbox/crates/types/src/token_info.rs b/zkstack_cli/crates/types/src/token_info.rs similarity index 100% rename from zk_toolbox/crates/types/src/token_info.rs rename to zkstack_cli/crates/types/src/token_info.rs diff --git a/zk_toolbox/crates/types/src/wallet_creation.rs b/zkstack_cli/crates/types/src/wallet_creation.rs similarity index 100% rename from zk_toolbox/crates/types/src/wallet_creation.rs rename to zkstack_cli/crates/types/src/wallet_creation.rs diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zkstack_cli/crates/zkstack/Cargo.toml similarity index 93% rename from zk_toolbox/crates/zk_inception/Cargo.toml rename to zkstack_cli/crates/zkstack/Cargo.toml index e6687bdd981..a9fcecaf79b 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zkstack_cli/crates/zkstack/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "zk_inception" +name = "zkstack" version = "0.1.0" edition.workspace = true homepage.workspace = true @@ -12,33 +12,36 @@ keywords.workspace = true [dependencies] anyhow.workspace = true +chrono.workspace = true clap.workspace = true +clap-markdown.workspace = true cliclack.workspace = true +common.workspace = true config.workspace = true +ethers.workspace = true +futures.workspace = true human-panic.workspace = true lazy_static.workspace = true -serde_yaml.workspace = true +secrecy.workspace = true serde.workspace = true serde_json.workspace = true -xshell.workspace = true -ethers.workspace = true -common.workspace = true -tokio.workspace = true -types.workspace = true +serde_yaml.workspace = true +slugify-rs.workspace = true strum.workspace = true +sqruff-lib = "0.19.0" +thiserror.workspace = true +tokio.workspace = true toml.workspace = true +types.workspace = true url.workspace = true -thiserror.workspace = true -zksync_config.workspace = true -slugify-rs.workspace = true +xshell.workspace = true zksync_basic_types.workspace = true -clap-markdown.workspace = true +zksync_config.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_crypto.workspace = true zksync_protobuf.workspace = true zksync_protobuf_config.workspace = true prost.workspace = true -secrecy.workspace = true reqwest = "0.12.8" [dev-dependencies] diff --git a/zk_toolbox/crates/zk_inception/README.md b/zkstack_cli/crates/zkstack/README.md similarity index 99% rename from zk_toolbox/crates/zk_inception/README.md rename to zkstack_cli/crates/zkstack/README.md index 7fbbb58c88f..6e529efc200 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zkstack_cli/crates/zkstack/README.md @@ -42,7 +42,7 @@ This document contains the help content for the `zk_inception` command-line prog ## `zk_inception` -ZK Toolbox is a set of tools for working with zk stack. +ZK Stack CLI is a set of tools for working with zk stack. **Usage:** `zk_inception [OPTIONS] ` diff --git a/zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json b/zkstack_cli/crates/zkstack/abi/ConsensusRegistry.json similarity index 100% rename from zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json rename to zkstack_cli/crates/zkstack/abi/ConsensusRegistry.json diff --git a/zk_toolbox/crates/zk_inception/build.rs b/zkstack_cli/crates/zkstack/build.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/build.rs rename to zkstack_cli/crates/zkstack/build.rs diff --git a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs b/zkstack_cli/crates/zkstack/src/accept_ownership.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/accept_ownership.rs rename to zkstack_cli/crates/zkstack/src/accept_ownership.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/containers.rs b/zkstack_cli/crates/zkstack/src/commands/args/containers.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/containers.rs rename to zkstack_cli/crates/zkstack/src/commands/args/containers.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs b/zkstack_cli/crates/zkstack/src/commands/args/run_server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs rename to zkstack_cli/crates/zkstack/src/commands/args/run_server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/update.rs b/zkstack_cli/crates/zkstack/src/commands/args/update.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/update.rs rename to zkstack_cli/crates/zkstack/src/commands/args/update.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs b/zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/common.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/common.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/create.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs b/zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs b/zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/conv.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/consensus/conv.rs rename to zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.proto b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.proto rename to zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/tests.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/consensus/tests.rs rename to zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zkstack_cli/crates/zkstack/src/commands/containers.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/containers.rs rename to zkstack_cli/crates/zkstack/src/commands/containers.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs index 803e962c0ff..4cb419ce7a4 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs @@ -4,7 +4,7 @@ use common::{docker, logger}; use config::{EcosystemConfig, DOCKER_COMPOSE_FILE}; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_CLEANED, MSG_DOCKER_COMPOSE_DOWN, MSG_DOCKER_COMPOSE_REMOVE_VOLUMES, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs similarity index 96% rename from zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs index 04e019936e1..70238ed15f3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs @@ -4,7 +4,7 @@ use common::{logger, Prompt}; use config::{override_config, EcosystemConfig}; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_overriding_config, MSG_CHAIN_NOT_FOUND_ERR, MSG_OVERRIDE_CONFIG_PATH_HELP, MSG_OVERRIDE_SUCCESS, MSG_OVERRRIDE_CONFIG_PATH_PROMPT, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs similarity index 99% rename from zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs index bab4205cd66..6f420e66ba0 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs @@ -5,7 +5,7 @@ use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs index cf9dfc2834a..f05e3ee1c0e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::{ +use crate::commands::dev::{ dals::SelectedDals, messages::{ MSG_DATABASE_COMMON_CORE_HELP, MSG_DATABASE_COMMON_CORE_URL_HELP, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs index 64b7a507abe..b91b048be78 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs @@ -2,7 +2,7 @@ use clap::{Parser, ValueEnum}; use common::{Prompt, PromptSelect}; use strum::{Display, EnumIter, IntoEnumIterator}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DATABASE_NEW_MIGRATION_DATABASE_HELP, MSG_DATABASE_NEW_MIGRATION_DB_PROMPT, MSG_DATABASE_NEW_MIGRATION_NAME_HELP, MSG_DATABASE_NEW_MIGRATION_NAME_PROMPT, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs index 0c401595690..990fca78641 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs index 94bf325a2c6..a5578d41f77 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs @@ -6,7 +6,7 @@ use common::{ use xshell::Shell; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_DROP_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs index 1d648965c24..fd22f769742 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_MIGRATE_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs index 415b81879f1..ed039fc6501 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use xshell::Shell; use self::args::{new_migration::DatabaseNewMigrationArgs, DatabaseCommonArgs}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DATABASE_CHECK_SQLX_DATA_ABOUT, MSG_DATABASE_DROP_ABOUT, MSG_DATABASE_MIGRATE_ABOUT, MSG_DATABASE_NEW_MIGRATION_ABOUT, MSG_DATABASE_PREPARE_ABOUT, MSG_DATABASE_RESET_ABOUT, MSG_DATABASE_SETUP_ABOUT, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs index e21b7cde47b..2d9fa103053 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::new_migration::{DatabaseNewMigrationArgs, SelectedDatabase}; -use crate::{ +use crate::commands::dev::{ dals::{get_core_dal, get_prover_dal, Dal}, messages::{msg_database_new_migration_loading, MSG_DATABASE_NEW_MIGRATION_SUCCESS}, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs index 82ec12f9412..288a68452fd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_PREPARE_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs index f0262cecb95..55d5ab1cbfc 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::Shell; use super::{args::DatabaseCommonArgs, drop::drop_database, setup::setup_database}; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_RESET_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs index 15b3ac5c1c7..74ade66ba48 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_SETUP_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs index a6db4643c30..ebaf27845e0 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs @@ -6,7 +6,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::sql_fmt::format_sql; -use crate::{ +use crate::commands::dev::{ commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_fmt_for_extension_spinner, msg_running_fmt_for_extensions_spinner, @@ -42,7 +42,7 @@ async fn prettier_contracts(shell: Shell, check: bool) -> anyhow::Result<()> { } async fn rustfmt(shell: Shell, check: bool, link_to_code: PathBuf) -> anyhow::Result<()> { - for dir in [".", "prover", "zk_toolbox"] { + for dir in [".", "prover", "zkstack_cli"] { let spinner = Spinner::new(&msg_running_rustfmt_for_dir_spinner(dir)); let _dir = shell.push_dir(link_to_code.join(dir)); let mut cmd = cmd!(shell, "cargo fmt -- --config imports_granularity=Crate --config group_imports=StdExternalCrate"); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs similarity index 95% rename from zk_toolbox/crates/zk_supervisor/src/commands/lint.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs index 45a7a46ebbe..71f21a02e73 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs @@ -3,7 +3,7 @@ use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_linter_for_extension_spinner, msg_running_linters_for_files, @@ -55,8 +55,8 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R let link_to_code = &ecosystem.link_to_code; let lint_to_prover = &ecosystem.link_to_code.join("prover"); - let link_to_toolbox = &ecosystem.link_to_code.join("zk_toolbox"); - let paths = vec![link_to_code, lint_to_prover, link_to_toolbox]; + let link_to_zkstack = &ecosystem.link_to_code.join("zkstack_cli"); + let paths = vec![link_to_code, lint_to_prover, link_to_zkstack]; spinner.freeze(); for path in paths { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_batch.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_batch.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_version.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_version.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs index 441edb2c4b2..84873e931b3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs @@ -8,7 +8,7 @@ use common::logger; use config::{ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; +use crate::commands::dev::messages::MSG_CHAIN_NOT_FOUND_ERR; pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs index 8c2cdd4d88d..0e0c0ba33af 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs @@ -2,7 +2,7 @@ use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::prover::{ args::insert_batch::{InsertBatchArgs, InsertBatchArgsFinal}, info, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs index 3dd9b7e0a1b..f7bd175f577 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs @@ -2,7 +2,7 @@ use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::prover::{ args::insert_version::{InsertVersionArgs, InsertVersionArgsFinal}, info, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs index e3d4f220ff2..03d9ec9b736 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs @@ -4,7 +4,7 @@ use clap::Parser; use common::Prompt; use url::Url; -use crate::{ +use crate::commands::dev::{ defaults::LOCAL_RPC_URL, messages::{ MSG_INVALID_L1_RPC_URL_ERR, MSG_PROMPT_L1_RPC_URL, MSG_PROMPT_SECRET_KEY, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs similarity index 99% rename from zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs index 79d8efc600e..2f54579ade9 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs @@ -17,7 +17,7 @@ use tokio::time::sleep; use xshell::Shell; use zksync_basic_types::{H160, U256}; -use crate::{ +use crate::commands::dev::{ consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, messages::{ msg_send_txns_outro, MSG_FAILED_TO_SEND_TXN_ERR, MSG_UNABLE_TO_OPEN_FILE_ERR, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs similarity index 91% rename from zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs index 608c5623334..8e4c7183cb5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs @@ -4,7 +4,7 @@ use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_RUNNING_SNAPSHOT_CREATOR}; +use crate::commands::dev::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_RUNNING_SNAPSHOT_CREATOR}; #[derive(Subcommand, Debug)] pub enum SnapshotCommands { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs index ede2500e6ab..0f7ce061ce1 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs @@ -6,7 +6,7 @@ use sqruff_lib::{api::simple::get_simple_config, core::linter::core::Linter}; use xshell::Shell; use super::lint_utils::{get_unignored_files, IgnoredData, Target}; -use crate::messages::{msg_file_is_not_formatted, MSG_RUNNING_SQL_FMT_SPINNER}; +use crate::commands::dev::messages::{msg_file_is_not_formatted, MSG_RUNNING_SQL_FMT_SPINNER}; fn format_query(query: &str) -> anyhow::Result { let exclude_rules = vec!["LT12".to_string()]; // avoid adding newline before `$` character @@ -138,7 +138,7 @@ pub async fn format_sql(shell: Shell, check: bool) -> anyhow::Result<()> { let spinner = Spinner::new(MSG_RUNNING_SQL_FMT_SPINNER); let ignored_data = Some(IgnoredData { files: vec![], - dirs: vec!["zk_toolbox".to_string()], + dirs: vec!["zkstack_cli".to_string()], }); let rust_files = get_unignored_files(&shell, &Target::Rs, ignored_data)?; for file in rust_files { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs similarity index 78% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs index 1337566e536..83d505aa575 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs @@ -1,7 +1,7 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; +use crate::commands::dev::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct FeesArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs similarity index 78% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs index 6cec40a2e33..625df0fc151 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP}; +use crate::commands::dev::messages::{ + MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct IntegrationArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs similarity index 76% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs index 81cc58fbd9b..cf4734fd82e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_TESTS_RECOVERY_SNAPSHOT_HELP}; +use crate::commands::dev::messages::{ + MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_TESTS_RECOVERY_SNAPSHOT_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct RecoveryArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs similarity index 93% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs index 0154a4c0afd..e4fb7fba2a9 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs similarity index 70% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs index 2d94adc3f6a..6ca277f6a2f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::MSG_TEST_RUST_OPTIONS_HELP; +use crate::commands::dev::messages::MSG_TEST_RUST_OPTIONS_HELP; #[derive(Debug, Parser)] pub struct RustArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs similarity index 72% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs index dd96957e9d3..7b631b91e9a 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::MSG_NO_DEPS_HELP; +use crate::commands::dev::messages::MSG_NO_DEPS_HELP; #[derive(Debug, Parser)] pub struct UpgradeArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs similarity index 88% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs index d173bb95168..19f6307019b 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs @@ -3,7 +3,7 @@ use std::path::Path; use common::{cmd::Cmd, db::wait_for_db, logger}; use xshell::{cmd, Shell}; -use crate::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; +use crate::commands::dev::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; pub async fn reset_test_databases( shell: &Shell, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs index e0b881a14db..e58a70e6b7c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs @@ -9,7 +9,7 @@ use super::{ args::fees::FeesArgs, utils::{build_contracts, install_and_build_dependencies, TS_INTEGRATION_PATH}, }; -use crate::{ +use crate::commands::dev::{ commands::test::utils::{TestWallets, TEST_WALLETS_PATH}, messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs index 5107abf6a59..3bc3093bf93 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs @@ -12,7 +12,7 @@ use super::{ TS_INTEGRATION_PATH, }, }; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_integration_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_INTEGRATION_TESTS_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs similarity index 86% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs index 0a1e1ec5203..7d163daed67 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs @@ -2,7 +2,7 @@ use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::MSG_L1_CONTRACTS_TEST_SUCCESS; +use crate::commands::dev::messages::MSG_L1_CONTRACTS_TEST_SUCCESS; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs similarity index 95% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs index ee307438ec9..72a8f97ff97 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs @@ -3,7 +3,7 @@ use common::{cmd::Cmd, config::global_config, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; +use crate::commands::dev::messages::MSG_CHAIN_NOT_FOUND_ERR; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs index facd98850d4..095e27652aa 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs @@ -5,7 +5,7 @@ use args::{ use clap::Subcommand; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_BUILD_ABOUT, MSG_INTEGRATION_TESTS_ABOUT, MSG_L1_CONTRACTS_ABOUT, MSG_LOADTEST_ABOUT, MSG_PROVER_TEST_ABOUT, MSG_RECOVERY_TEST_ABOUT, MSG_REVERT_TEST_ABOUT, MSG_RUST_TEST_ABOUT, MSG_TEST_WALLETS_INFO, MSG_UPGRADE_TEST_ABOUT, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs index f48b359a935..200baf57215 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use url::Url; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::test::db::reset_test_databases, dals::{Dal, PROVER_DAL_PATH}, defaults::TEST_DATABASE_PROVER_URL, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs index 6a3e337d41e..ae889969fd2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs @@ -9,7 +9,7 @@ use super::{ args::recovery::RecoveryArgs, utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, }; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_RECOVERY_TEST_RUN_INFO, MSG_RECOVERY_TEST_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs index 8b00e9d7f4d..dc95c88db20 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs @@ -9,7 +9,7 @@ use super::{ args::revert::RevertArgs, utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, }; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_revert_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_REVERT_TEST_RUN_INFO, MSG_REVERT_TEST_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs similarity index 94% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs index 7011e0f0f87..8c0c707f6a2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs @@ -7,7 +7,7 @@ use url::Url; use xshell::{cmd, Shell}; use super::args::rust::RustArgs; -use crate::{ +use crate::commands::dev::{ commands::test::db::reset_test_databases, dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, @@ -75,8 +75,8 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { .env("TEST_PROVER_DATABASE_URL", test_prover_url); cmd.run()?; - // Run unit tests for zk_toolbox - let _dir_guard = shell.push_dir(link_to_code.join("zk_toolbox")); + // Run unit tests for ZK Stack CLI + let _dir_guard = shell.push_dir(link_to_code.join("zkstack_cli")); Cmd::new(cmd!(shell, "cargo nextest run --release")) .with_force_run() .run()?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs similarity index 91% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs index 9bd04b81ef3..707e0086ed1 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs @@ -3,7 +3,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::{args::upgrade::UpgradeArgs, utils::install_and_build_dependencies}; -use crate::messages::{MSG_UPGRADE_TEST_RUN_INFO, MSG_UPGRADE_TEST_RUN_SUCCESS}; +use crate::commands::dev::messages::{MSG_UPGRADE_TEST_RUN_INFO, MSG_UPGRADE_TEST_RUN_SUCCESS}; const UPGRADE_TESTS_PATH: &str = "core/tests/upgrade-test"; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs index d980490c3d5..bcd524bd2cb 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs @@ -10,7 +10,7 @@ use ethers::{ use serde::Deserialize; use xshell::{cmd, Shell}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs similarity index 96% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs index 62f32b50d55..6953014bf92 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs @@ -6,7 +6,7 @@ use config::EcosystemConfig; use xshell::Shell; use super::utils::{TestWallets, TEST_WALLETS_PATH}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_TEST_WALLETS_INFO, MSG_WALLETS_TEST_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/consts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/consts.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/consts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/consts.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zkstack_cli/crates/zkstack/src/commands/dev/dals.rs similarity index 99% rename from zk_toolbox/crates/zk_supervisor/src/dals.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/dals.rs index b998eb4301d..9626edfed73 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/dals.rs @@ -3,7 +3,7 @@ use config::{EcosystemConfig, SecretsConfig}; use url::Url; use xshell::Shell; -use crate::{ +use super::{ commands::database::args::DalUrls, messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_DATABASE_MUST_BE_PRESENTED}, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/defaults.rs b/zkstack_cli/crates/zkstack/src/commands/dev/defaults.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/defaults.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/defaults.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/messages.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/messages.rs index 6f6deb22edb..00617e26064 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs @@ -1,12 +1,8 @@ -use crate::commands::lint_utils::Target; +use super::commands::lint_utils::Target; // Ecosystem related messages pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; -pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &str) -> String { - format!("Chain with name {chain} doesnt exist, please choose one of: {available_chains}") -} - // Subcommands help pub(super) const MSG_PROVER_VERSION_ABOUT: &str = "Protocol version used by provers"; pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs new file mode 100644 index 00000000000..e8d23f15b69 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs @@ -0,0 +1,61 @@ +use clap::Subcommand; +use xshell::Shell; + +use self::commands::{ + clean::CleanCommands, config_writer::ConfigWriterArgs, contracts::ContractsArgs, + database::DatabaseCommands, fmt::FmtArgs, lint::LintArgs, prover::ProverCommands, + send_transactions::args::SendTransactionsArgs, snapshot::SnapshotCommands, test::TestCommands, +}; +use crate::commands::dev::messages::{ + MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, + MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, + MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, +}; + +mod commands; +mod consts; +mod dals; +mod defaults; +mod messages; + +#[derive(Subcommand, Debug)] +pub enum DevCommands { + #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT, alias = "db")] + Database(DatabaseCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_TESTS_ABOUT, alias = "t")] + Test(TestCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_CLEAN)] + Clean(CleanCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT)] + Snapshot(SnapshotCommands), + #[command(about = MSG_SUBCOMMAND_LINT_ABOUT, alias = "l")] + Lint(LintArgs), + #[command(about = MSG_SUBCOMMAND_FMT_ABOUT)] + Fmt(FmtArgs), + #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] + Prover(ProverCommands), + #[command(about = MSG_CONTRACTS_ABOUT)] + Contracts(ContractsArgs), + #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] + ConfigWriter(ConfigWriterArgs), + #[command(about = MSG_SEND_TXNS_ABOUT)] + SendTransactions(SendTransactionsArgs), +} + +pub async fn run(shell: &Shell, args: DevCommands) -> anyhow::Result<()> { + match args { + DevCommands::Database(command) => commands::database::run(shell, command).await?, + DevCommands::Test(command) => commands::test::run(shell, command).await?, + DevCommands::Clean(command) => commands::clean::run(shell, command)?, + DevCommands::Snapshot(command) => commands::snapshot::run(shell, command).await?, + DevCommands::Lint(args) => commands::lint::run(shell, args)?, + DevCommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, + DevCommands::Prover(command) => commands::prover::run(shell, command).await?, + DevCommands::Contracts(args) => commands::contracts::run(shell, args)?, + DevCommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, + DevCommands::SendTransactions(args) => { + commands::send_transactions::run(shell, args).await? + } + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/change_default.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/change_default.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/change_default.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/change_default.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/change_default.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/change_default.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create_configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/create_configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/setup_observability.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/setup_observability.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/utils.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/utils.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/backend.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/backend.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/prepare_configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/prepare_configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/mod.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/mod.rs index 78a46797602..c46400cc865 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/mod.rs @@ -3,6 +3,7 @@ pub mod chain; pub mod consensus; pub mod containers; pub mod contract_verifier; +pub mod dev; pub mod ecosystem; pub mod explorer; pub mod external_node; diff --git a/zk_toolbox/crates/zk_inception/src/commands/portal.rs b/zkstack_cli/crates/zkstack/src/commands/portal.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/portal.rs rename to zkstack_cli/crates/zkstack/src/commands/portal.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/compressor_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/compressor_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/setup_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/setup_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs b/zkstack_cli/crates/zkstack/src/commands/prover/gcs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/gcs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/init.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/init_bellman_cuda.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/init_bellman_cuda.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zkstack_cli/crates/zkstack/src/commands/prover/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/run.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/setup_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/setup_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zkstack_cli/crates/zkstack/src/commands/server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/server.rs rename to zkstack_cli/crates/zkstack/src/commands/server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/update.rs b/zkstack_cli/crates/zkstack/src/commands/update.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/update.rs rename to zkstack_cli/crates/zkstack/src/commands/update.rs diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/consts.rs rename to zkstack_cli/crates/zkstack/src/consts.rs diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zkstack_cli/crates/zkstack/src/defaults.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/defaults.rs rename to zkstack_cli/crates/zkstack/src/defaults.rs diff --git a/zk_toolbox/crates/zk_inception/src/external_node.rs b/zkstack_cli/crates/zkstack/src/external_node.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/external_node.rs rename to zkstack_cli/crates/zkstack/src/external_node.rs diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zkstack_cli/crates/zkstack/src/main.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/main.rs rename to zkstack_cli/crates/zkstack/src/main.rs index a305ca053b7..987de555ecf 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zkstack_cli/crates/zkstack/src/main.rs @@ -2,6 +2,7 @@ use clap::{command, Parser, Subcommand}; use commands::{ args::{ContainersArgs, UpdateArgs}, contract_verifier::ContractVerifierCommands, + dev::DevCommands, }; use common::{ check_general_prerequisites, @@ -46,6 +47,9 @@ pub enum InceptionSubcommands { /// Chain related commands #[command(subcommand, alias = "c")] Chain(Box), + /// Chain related commands + #[command(subcommand)] + Dev(DevCommands), /// Prover related commands #[command(subcommand, alias = "p")] Prover(ProverCommands), @@ -123,6 +127,7 @@ async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Res match inception_args.command { InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, *args).await?, InceptionSubcommands::Chain(args) => commands::chain::run(shell, *args).await?, + InceptionSubcommands::Dev(args) => commands::dev::run(shell, args).await?, InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, InceptionSubcommands::Containers(args) => commands::containers::run(shell, args)?, diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs similarity index 99% rename from zk_toolbox/crates/zk_inception/src/messages.rs rename to zkstack_cli/crates/zkstack/src/messages.rs index ebdcf7378a4..d1d86db8398 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -296,7 +296,7 @@ pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR: &str = pub(super) const MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR: &str = "Failed to create explorer config"; pub(super) const MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = - "Failed to find any valid chain to run explorer for. Did you run `zk_inception explorer init`?"; + "Failed to find any valid chain to run explorer for. Did you run `zkstack explorer init`?"; pub(super) const MSG_EXPLORER_INITIALIZED: &str = "Explorer has been initialized successfully"; pub(super) fn msg_explorer_initializing_database_for(chain: &str) -> String { format!("Initializing explorer database for {chain} chain") @@ -311,7 +311,7 @@ pub(super) fn msg_explorer_starting_on(host: &str, port: u16) -> String { format!("Starting explorer on http://{host}:{port}") } pub(super) fn msg_explorer_chain_not_initialized(chain: &str) -> String { - format!("Chain {chain} is not initialized for explorer: run `zk_inception explorer init --chain {chain}` first") + format!("Chain {chain} is not initialized for explorer: run `zkstack explorer init --chain {chain}` first") } /// Forge utils related messages diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zkstack_cli/crates/zkstack/src/utils/consensus.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/consensus.rs rename to zkstack_cli/crates/zkstack/src/utils/consensus.rs diff --git a/zk_toolbox/crates/zk_inception/src/utils/forge.rs b/zkstack_cli/crates/zkstack/src/utils/forge.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/forge.rs rename to zkstack_cli/crates/zkstack/src/utils/forge.rs diff --git a/zk_toolbox/crates/zk_inception/src/utils/mod.rs b/zkstack_cli/crates/zkstack/src/utils/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/mod.rs rename to zkstack_cli/crates/zkstack/src/utils/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zkstack_cli/crates/zkstack/src/utils/ports.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/ports.rs rename to zkstack_cli/crates/zkstack/src/utils/ports.rs diff --git a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs b/zkstack_cli/crates/zkstack/src/utils/rocks_db.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs rename to zkstack_cli/crates/zkstack/src/utils/rocks_db.rs diff --git a/zk_toolbox/rust-toolchain b/zkstack_cli/rust-toolchain similarity index 100% rename from zk_toolbox/rust-toolchain rename to zkstack_cli/rust-toolchain diff --git a/zkstack_cli/zkstackup/README.md b/zkstack_cli/zkstackup/README.md new file mode 100644 index 00000000000..4977c4641e0 --- /dev/null +++ b/zkstack_cli/zkstackup/README.md @@ -0,0 +1,70 @@ +# zkstackup - ZK Stack CLI Installer + +`zkstackup` is a script designed to simplify the installation of +[ZK Stack CLI](https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli). It allows you to install the tool from +a local directory or directly from a GitHub repository. + +## Getting Started + +To install `zkstackup`, run the following command: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +After installing `zkstackup`, you can use it to install `zkstack_cli` with: + +```bash +zkstackup +``` + +## Usage + +The `zkstackup` script provides various options for installing ZK Stack CLI: + +### Options + +- `-p, --path ` + Specify a local path to install ZK Stack CLI from. This option is ignored if `--repo` is provided. + +- `-r, --repo ` + GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". + +- `-b, --branch ` + Git branch to use when installing from a repository. Ignored if `--commit` or `--version` is provided. + +- `-c, --commit ` + Git commit hash to use when installing from a repository. Ignored if `--branch` or `--version` is provided. + +- `-v, --version ` + Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. + +### Local Installation + +If you provide a local path using the `-p` or `--path` option, `zkstackup` will install ZK Stack CLI from that +directory. Note that repository-specific arguments (`--repo`, `--branch`, `--commit`, `--version`) will be ignored in +this case to preserve git state. + +### Repository Installation + +By default, `zkstackup` installs ZK Stack CLI from the "matter-labs/zksync-era" GitHub repository. You can specify a +different repository, branch, commit, or version using the respective options. If multiple arguments are provided, +`zkstackup` will prioritize them as follows: + +- `--version` +- `--commit` +- `--branch` + +### Examples + +**Install from a GitHub repository with a specific version:** + +```bash +zkstackup --repo matter-labs/zksync-era --version 0.1.1 +``` + +**Install from a local path:** + +```bash +zkstackup --path /path/to/local/zkstack_cli +``` diff --git a/zkstack_cli/zkstackup/install b/zkstack_cli/zkstackup/install new file mode 100755 index 00000000000..f20ba4dd545 --- /dev/null +++ b/zkstack_cli/zkstackup/install @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +set -eo pipefail + +BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/zkstackup" + +HOME_DIR=${XDG_CONFIG_HOME:-$HOME} +BIN_DIR="$HOME_DIR/.local/bin" +BIN_PATH="$BIN_DIR/zkstackup" + +main() { + parse_args "$@" + + mkdir -p "$BIN_DIR" + + if [ -n "$ZKSTACKUP_PATH" ]; then + cp -r "$ZKSTACKUP_PATH" "$BIN_DIR" + else + curl -sSfL "$BIN_URL" -o "$BIN_PATH" + fi + + chmod +x "$BIN_PATH" + echo "zkstackup: successfully installed in ${BIN_DIR}." + + add_bin_folder_to_path +} + +add_bin_folder_to_path() { + if [[ ":$PATH:" == *":${BIN_DIR}:"* ]]; then + echo "zkstackup: found ${BIN_DIR} in PATH" + exit 0 + fi + + case $SHELL in + */zsh) + PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" + ;; + */bash) + PROFILE="$HOME/.bashrc" + ;; + */fish) + PROFILE="$HOME/.config/fish/config.fish" + ;; + */ash) + PROFILE="$HOME/.profile" + ;; + *) + echo "zkstackup: could not detect shell, manually add ${BIN_DIR} to your PATH." + exit 1 + ;; + esac + + if [[ ! -f "$PROFILE" ]]; then + echo "zkstackup: Profile file $PROFILE does not exist, creating it." + touch "$PROFILE" + fi + + if [[ "$SHELL" == *"/fish"* ]]; then + echo -e "\n# Added by zkstackup\nfish_add_path -a $BIN_DIR" >>"$PROFILE" + echo "zkstackup: Added $BIN_DIR to PATH in $PROFILE using fish_add_path." + else + echo -e "\n# Added by zkstackup\nexport PATH=\"\$PATH:$BIN_DIR\"" >>"$PROFILE" + echo "zkstackup: Added $BIN_DIR to PATH in $PROFILE." + fi + + echo + echo "Added zkstackup to PATH." + echo "Run 'source $PROFILE' or start a new terminal session to use zkstackup." + echo "Then run 'zkstackup' to install ZK Stack CLI." +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --) + shift + break + ;; + -p | --path) + shift + ZKSTACKUP_PATH=$1 + ;; + -l | --local) + ZKSTACKUP_PATH="./" + ;; + -g | --global) + BIN_DIR="/usr/local/bin" + BIN_PATH="$BIN_DIR/zkstackup" + ;; + -h | --help) + usage + exit 0 + ;; + *) + err "Unknown argument: $1" + usage + exit 1 + ;; + esac + shift + done +} + + +usage() { + cat < Specify a local path to install zkstackup from. + -l, --local Install zkstackup from the current directory. + -g, --global Install zkstackup for all users. + -h, --help Show this help message and exit. + +Examples: + $(basename "$0") --path /path/to/zkstackup +EOF +} + +main "$@" diff --git a/zkstack_cli/zkstackup/zkstackup b/zkstack_cli/zkstackup/zkstackup new file mode 100755 index 00000000000..20a061620f9 --- /dev/null +++ b/zkstack_cli/zkstackup/zkstackup @@ -0,0 +1,273 @@ +#!/usr/bin/env bash +set -eo pipefail + +HOME_DIR=${XDG_CONFIG_HOME:-$HOME} +LOCAL_DIR=${LOCAL_DIR:-"$HOME_DIR/.local"} +BIN_DIR="$LOCAL_DIR/bin" + +BINS=() + +main() { + parse_args "$@" + + zkstack_banner + + check_prerequisites + mkdir -p "$BIN_DIR" + + BINS+=(zkstack) + + if [ -n "$ZKSTACKUP_PATH" ]; then + install_local + else + install_from_repo + fi + + zkstack_banner + + add_bin_folder_to_path + + for bin in "${BINS[@]}"; do + success "Installed $bin to $BIN_DIR/$bin" + done +} + +PREREQUISITES=(cargo git) + +check_prerequisites() { + say "Checking prerequisites" + + failed_prerequisites=() + for prerequisite in "${PREREQUISITES[@]}"; do + if ! check_prerequisite "$prerequisite"; then + failed_prerequisites+=("$prerequisite") + fi + done + if [ ${#failed_prerequisites[@]} -gt 0 ]; then + err "The following prerequisites are missing: ${failed_prerequisites[*]}" + exit 1 + fi +} + +check_prerequisite() { + command -v "$1" &>/dev/null +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --) + shift + break + ;; + -p | --path) + shift + ZKSTACKUP_PATH=$1 + ;; + -l | --local) + ZKSTACKUP_PATH="./" + ;; + -g | --global) + LOCAL_DIR="/usr/local" + BIN_DIR="$LOCAL_DIR/bin" + ;; + -r | --repo) + shift + ZKSTACKUP_REPO=$1 + ;; + -b | --branch) + shift + ZKSTACKUP_BRANCH=$1 + ;; + -c | --commit) + shift + ZKSTACKUP_COMMIT=$1 + ;; + -v | --version) + shift + ZKSTACKUP_VERSION=$1 + ;; + -h | --help) + usage + exit 0 + ;; + *) + err "Unknown argument: $1" + usage + exit 1 + ;; + esac + shift + done +} + +usage() { + cat < Specify a local path to install ZK Stack CLI from. Ignored if --repo is provided. + -l, --local Install ZK Stack CLI from the current directory. Ignored if --repo is provided. + -g, --global Install ZK Stack CLI for all users. + -r, --repo GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". + -b, --branch Git branch to use when installing from a repository. Ignored if --commit or --version is provided. + -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. + -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. + -h, --help Show this help message and exit. + +Examples: + $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 +EOF +} + +install_local() { + if [ ! -d "$ZKSTACKUP_PATH/zkstack_cli" ]; then + err "Path $ZKSTACKUP_PATH does not contain zkstack_cli" + exit 1 + fi + + if [ "$ZKSTACKUP_PATH" = "./" ]; then + if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then + git config --local core.hooksPath || + git config --local core.hooksPath ./.githooks + fi + fi + + if [ -n "$ZKSTACKUP_BRANCH" ] || [ -n "$ZKSTACKUP_COMMIT" ] || [ -n "$ZKSTACKUP_VERSION" ] || [ -n "$ZKSTACKUP_REPO" ]; then + warn "Ignoring --repo, --branch, --commit and --version arguments when installing from local path" + fi + + say "Installing ZK Stack CLI from $ZKSTACKUP_PATH" + ensure cd "$ZKSTACKUP_PATH"/zkstack_cli + + for bin in "${BINS[@]}"; do + say "Installing $bin" + ensure cargo install --root $LOCAL_DIR --path ./crates/$bin --force + chmod +x "$BIN_DIR/$bin" + done +} + +install_from_repo() { + if [ -n "$ZKSTACKUP_PATH" ]; then + warn "Ignoring --path argument when installing from repository" + fi + + ZKSTACKUP_REPO=${ZKSTACKUP_REPO:-"matter-labs/zksync-era"} + + say "Installing ZK Stack CLI from $ZKSTACKUP_REPO" + + if [ -n "$ZKSTACKUP_VERSION" ]; then + if [ -n "$ZKSTACKUP_COMMIT" ] || [ -n "$ZKSTACKUP_BRANCH" ]; then + warn "Ignoring --commit and --branch arguments when installing by version" + fi + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --tag "zkstack_cli-v$ZKSTACKUP_VERSION" --locked "${BINS[@]}" --force + elif [ -n "$ZKSTACKUP_COMMIT" ]; then + if [ -n "$ZKSTACKUP_BRANCH" ]; then + warn "Ignoring --branch argument when installing by commit" + fi + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --rev "$ZKSTACKUP_COMMIT" --locked "${BINS[@]}" --force + elif [ -n "$ZKSTACKUP_BRANCH" ]; then + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --branch "$ZKSTACKUP_BRANCH" --locked "${BINS[@]}" --force + else + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --locked "${BINS[@]}" --force + fi +} + +add_bin_folder_to_path() { + if [[ ":$PATH:" == *":${BIN_DIR}:"* ]]; then + echo "found ${BIN_DIR} in PATH" + exit 0 + fi + + case $SHELL in + */zsh) + PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" + ;; + */bash) + PROFILE="$HOME/.bashrc" + ;; + */fish) + PROFILE="$HOME/.config/fish/config.fish" + ;; + */ash) + PROFILE="$HOME/.profile" + ;; + *) + echo "could not detect shell, manually add ${BIN_DIR} to your PATH." + exit 1 + ;; + esac + + if [[ ! -f "$PROFILE" ]]; then + echo "Profile file $PROFILE does not exist, creating it." + touch "$PROFILE" + fi + + if [[ "$SHELL" == *"/fish"* ]]; then + echo -e "\n# Added by zkstackup\nfish_add_path -a $BIN_DIR" >>"$PROFILE" + echo "Added $BIN_DIR to PATH in $PROFILE using fish_add_path." + else + echo -e "\n# Added by zkstackup\nexport PATH=\"\$PATH:$BIN_DIR\"" >>"$PROFILE" + echo "Added $BIN_DIR to PATH in $PROFILE." + fi + + echo + echo "Added zkstack to PATH." + echo "Run 'source $PROFILE' or start a new terminal session to use zkstack." +} + +ensure() { + if ! "$@"; then + err "command failed: $*" + exit 1 + fi +} + +say() { + local action="${1%% *}" + local rest="${1#"$action" }" + + echo -e "\033[1;32m$action\033[0m $rest" +} + +success() { + echo -e "\033[1;32m$1\033[0m" +} + +warn() { + echo -e "\033[1;33mWARNING: $1\033[0m" +} + +err() { + echo -e "\033[1;31mERROR: $1\033[0m" >&2 +} + +zkstack_banner() { + printf ' + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + + ███████╗██╗ ██╗ ███████╗████████╗ █████╗ ██████╗██╗ ██╗ + ╚══███╔╝██║ ██╔╝ ██╔════╝╚══██╔══╝██╔══██╗██╔════╝██║ ██╔╝ + ███╔╝ █████╔╝ ███████╗ ██║ ███████║██║ █████╔╝ + ███╔╝ ██╔═██╗ ╚════██║ ██║ ██╔══██║██║ ██╔═██╗ + ███████╗██║ ██╗ ███████║ ██║ ██║ ██║╚██████╗██║ ██╗ + ╚══════╝╚═╝ ╚═╝ ╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝ + + + A Comprehensive Toolkit for Creating and Managing ZK Stack Chains + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +Repo : https://github.com/matter-labs/zksync-era/ +Docs : https://docs.zksync.io/ +Contribute : https://github.com/matter-labs/zksync-era/pulls + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +' +} + +main "$@" From bb5d1470d5e1e8e69d9b79c60284ea8adaee4038 Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Mon, 14 Oct 2024 23:38:58 +1100 Subject: [PATCH 055/140] fix(external-node): make fetcher rely on unsealed batches (#3088) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Originally though I should just make ENs ignore `OpenBatch` command when the batch is already present (see https://github.com/matter-labs/zksync-era/commit/19537f98a97df4539aaad96292db56739f9c9b75), but while writing a test I have realized that under normal execution EN cannot receive two `OpenBatch` commands for the same batch (even with re-execution/restart). Looking further I think fetcher logic is the actual culprit - the bug only reproduces when an unsealed batch is inserted but no associated L2 blocks have made it into the DB yet. ## Why ❔ Existing logic causes EN to panic sometimes ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --------- Co-authored-by: Danil --- core/lib/dal/src/blocks_dal.rs | 14 --------- core/node/consensus/src/testonly.rs | 9 +++--- core/node/node_sync/src/external_io.rs | 2 +- core/node/node_sync/src/fetcher.rs | 36 ++++++++++++++++++++++-- core/node/state_keeper/src/io/mempool.rs | 2 +- 5 files changed, 41 insertions(+), 22 deletions(-) diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index d59f95192c6..bf1b48130c4 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -2184,20 +2184,6 @@ impl BlocksDal<'_, '_> { Ok(Some((L2BlockNumber(min as u32), L2BlockNumber(max as u32)))) } - /// Returns `true` if there exists a non-sealed batch (i.e. there is one+ stored L2 block that isn't assigned - /// to any batch yet). - pub async fn pending_batch_exists(&mut self) -> DalResult { - let count = sqlx::query_scalar!( - "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" - ) - .instrument("pending_batch_exists") - .fetch_one(self.storage) - .await? - .unwrap_or(0); - - Ok(count != 0) - } - // methods used for measuring Eth tx stage transition latencies // and emitting metrics base on these measured data pub async fn oldest_uncommitted_batch_timestamp(&mut self) -> DalResult> { diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 4ebcf5c9a61..98c0d6b0813 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -219,10 +219,11 @@ impl StateKeeper { .wait(IoCursor::for_fetcher(&mut conn.0)) .await? .context("IoCursor::new()")?; - let pending_batch = ctx - .wait(conn.0.blocks_dal().pending_batch_exists()) + let batch_sealed = ctx + .wait(conn.0.blocks_dal().get_unsealed_l1_batch()) .await? - .context("pending_batch_exists()")?; + .context("get_unsealed_l1_batch()")? + .is_none(); let (actions_sender, actions_queue) = ActionQueue::new(); let addr = sync::watch::channel(None).0; let sync_state = SyncState::default(); @@ -258,7 +259,7 @@ impl StateKeeper { last_batch: cursor.l1_batch, last_block: cursor.next_l2_block - 1, last_timestamp: cursor.prev_l2_block_timestamp, - batch_sealed: !pending_batch, + batch_sealed, next_priority_op: PriorityOpId(1), actions_sender, sync_state: sync_state.clone(), diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 9148f963868..10fb2925015 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -247,7 +247,7 @@ impl StateKeeperIO for ExternalIO { ); self.pool - .connection() + .connection_tagged("sync_layer") .await? .blocks_dal() .insert_l1_batch(UnsealedL1BatchHeader { diff --git a/core/node/node_sync/src/fetcher.rs b/core/node/node_sync/src/fetcher.rs index 51b9f7c7a06..3f8558ed0ac 100644 --- a/core/node/node_sync/src/fetcher.rs +++ b/core/node/node_sync/src/fetcher.rs @@ -114,8 +114,8 @@ impl IoCursorExt for IoCursor { let mut this = Self::new(storage).await?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. - let was_new_batch_open = storage.blocks_dal().pending_batch_exists().await?; - if !was_new_batch_open { + let unsealed_batch = storage.blocks_dal().get_unsealed_l1_batch().await?; + if unsealed_batch.is_none() { this.l1_batch -= 1; // Should continue from the last L1 batch present in the storage } Ok(this) @@ -201,3 +201,35 @@ impl IoCursorExt for IoCursor { new_actions } } + +#[cfg(test)] +mod tests { + use zksync_dal::{ConnectionPool, Core, CoreDal}; + use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; + use zksync_state_keeper::io::IoCursor; + use zksync_types::{block::UnsealedL1BatchHeader, L1BatchNumber}; + + use crate::fetcher::IoCursorExt; + + #[tokio::test] + async fn io_cursor_recognizes_empty_unsealed_batch() -> anyhow::Result<()> { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + insert_genesis_batch(&mut conn, &GenesisParams::mock()) + .await + .unwrap(); + conn.blocks_dal() + .insert_l1_batch(UnsealedL1BatchHeader { + number: L1BatchNumber(1), + timestamp: 1, + protocol_version: None, + fee_address: Default::default(), + fee_input: Default::default(), + }) + .await?; + + let io_cursor = IoCursor::for_fetcher(&mut conn).await?; + assert_eq!(io_cursor.l1_batch, L1BatchNumber(1)); + Ok(()) + } +} diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 5a3fb8e4c4f..229f54132f7 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -225,7 +225,7 @@ impl StateKeeperIO for MempoolIO { } self.pool - .connection() + .connection_tagged("state_keeper") .await? .blocks_dal() .insert_l1_batch(UnsealedL1BatchHeader { From 4d527d4b44b6b083e2a813d48c79d8021ea6f843 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 14 Oct 2024 15:41:54 +0300 Subject: [PATCH 056/140] fix(api): Accept integer block count in `eth_feeHistory` (#3077) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ...besides hexadecimal count accepted previously. ## Why ❔ To be more compatible with Web3 tooling. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/lib/basic_types/src/web3/mod.rs | 29 +++++ core/lib/basic_types/src/web3/tests.rs | 10 ++ core/lib/eth_client/src/clients/http/query.rs | 16 +-- core/lib/web3_decl/src/namespaces/eth.rs | 5 +- core/lib/web3_decl/src/types.rs | 4 +- .../web3/backend_jsonrpsee/namespaces/eth.rs | 6 +- .../api_server/src/web3/namespaces/eth.rs | 7 +- core/node/api_server/src/web3/tests/mod.rs | 123 +++++++++++++++++- core/node/api_server/src/web3/tests/vm.rs | 30 +---- 9 files changed, 184 insertions(+), 46 deletions(-) diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index ecbe73f785b..aa7c4967033 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -21,6 +21,35 @@ mod tests; pub type Index = U64; +/// Number that can be either hex-encoded or decimal. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(untagged)] +pub enum U64Number { + Hex(U64), + Number(u64), +} + +impl From for u64 { + fn from(value: U64Number) -> Self { + match value { + U64Number::Hex(number) => number.as_u64(), + U64Number::Number(number) => number, + } + } +} + +impl From for U64Number { + fn from(value: u64) -> Self { + Self::Number(value) + } +} + +impl From for U64Number { + fn from(value: U64) -> Self { + Self::Hex(value) + } +} + // `Signature`, `keccak256`: from `web3::signing` /// A struct that represents the components of a secp256k1 signature. diff --git a/core/lib/basic_types/src/web3/tests.rs b/core/lib/basic_types/src/web3/tests.rs index 7f85bf12eb8..70805ab8b39 100644 --- a/core/lib/basic_types/src/web3/tests.rs +++ b/core/lib/basic_types/src/web3/tests.rs @@ -128,3 +128,13 @@ fn test_bytes_serde_json() { let decoded: Bytes = serde_json::from_str(&encoded).unwrap(); assert_eq!(original, decoded); } + +#[test] +fn deserializing_u64_number() { + let number: U64Number = serde_json::from_value(serde_json::json!(123)).unwrap(); + assert_eq!(u64::from(number), 123); + let number: U64Number = serde_json::from_value(serde_json::json!("0x123")).unwrap(); + assert_eq!(u64::from(number), 0x123); + let number: U64Number = serde_json::from_value(serde_json::json!("123")).unwrap(); + assert_eq!(u64::from(number), 0x123); +} diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 54419f3b562..5e788509461 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -396,16 +396,12 @@ where let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); let chunk_size = chunk_end - chunk_start; - let fee_history = EthNamespaceClient::fee_history( - client, - U64::from(chunk_size), - zksync_types::api::BlockNumber::from(chunk_end), - vec![], - ) - .rpc_context("fee_history") - .with_arg("chunk_size", &chunk_size) - .with_arg("block", &chunk_end) - .await?; + let fee_history = client + .fee_history(U64::from(chunk_size).into(), chunk_end.into(), vec![]) + .rpc_context("fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("block", &chunk_end) + .await?; // Check that the lengths are the same. if fee_history.inner.base_fee_per_gas.len() != fee_history.l2_pubdata_price.len() { diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index 9f271d80cbc..399773b845d 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -13,7 +13,8 @@ use zksync_types::{ use crate::{ client::{ForWeb3Network, L2}, types::{ - Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U256, U64, + Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U64Number, + U256, U64, }, }; @@ -180,7 +181,7 @@ pub trait EthNamespace { #[method(name = "feeHistory")] async fn fee_history( &self, - block_count: U64, + block_count: U64Number, newest_block: BlockNumber, reward_percentiles: Vec, ) -> RpcResult; diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index 9994d21107b..36ee48a54a1 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -16,7 +16,9 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; pub use zksync_types::{ api::{Block, BlockNumber, Log, TransactionReceipt, TransactionRequest}, ethabi, - web3::{BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, Work}, + web3::{ + BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, U64Number, Work, + }, Address, Transaction, H160, H256, H64, U256, U64, }; diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs index 15528c5b309..cc2209a35d3 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs @@ -4,7 +4,7 @@ use zksync_types::{ Log, Transaction, TransactionId, TransactionReceipt, TransactionVariant, }, transaction_request::CallRequest, - web3::{Bytes, Index, SyncState}, + web3::{Bytes, Index, SyncState, U64Number}, Address, H256, U256, U64, }; use zksync_web3_decl::{ @@ -260,11 +260,11 @@ impl EthNamespaceServer for EthNamespace { async fn fee_history( &self, - block_count: U64, + block_count: U64Number, newest_block: BlockNumber, reward_percentiles: Vec, ) -> RpcResult { - self.fee_history_impl(block_count, newest_block, reward_percentiles) + self.fee_history_impl(block_count.into(), newest_block, reward_percentiles) .await .map_err(|err| self.current_method().map_err(err)) } diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 008c529ec63..4439fc257cf 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -683,7 +683,7 @@ impl EthNamespace { pub async fn fee_history_impl( &self, - block_count: U64, + block_count: u64, newest_block: BlockNumber, reward_percentiles: Vec, ) -> Result { @@ -691,10 +691,7 @@ impl EthNamespace { .set_block_id(BlockId::Number(newest_block)); // Limit `block_count`. - let block_count = block_count - .as_u64() - .min(self.state.api_config.fee_history_limit) - .max(1); + let block_count = block_count.clamp(1, self.state.api_config.fee_history_limit); let mut connection = self.state.acquire_connection().await?; let newest_l2_block = self diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 77b0b1824c7..a8d90c281a7 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -21,6 +21,7 @@ use zksync_multivm::interface::{ TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionMetrics, }; +use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, @@ -32,6 +33,7 @@ use zksync_system_constants::{ use zksync_types::{ api, block::{pack_block_info, L2BlockHasher, L2BlockHeader}, + fee_model::{BatchFeeInput, FeeParams}, get_nonce_key, l2::L2Tx, storage::get_code_key, @@ -54,7 +56,7 @@ use zksync_web3_decl::{ http_client::HttpClient, rpc_params, types::{ - error::{ErrorCode, OVERSIZED_RESPONSE_CODE}, + error::{ErrorCode, INVALID_PARAMS_CODE, OVERSIZED_RESPONSE_CODE}, ErrorObjectOwned, }, }, @@ -435,6 +437,14 @@ async fn store_events( Ok((tx_location, events)) } +fn scaled_sensible_fee_input(scale: f64) -> BatchFeeInput { + ::default_batch_fee_input_scaled( + FeeParams::sensible_v1_default(), + scale, + scale, + ) +} + #[derive(Debug)] struct HttpServerBasicsTest; @@ -1228,3 +1238,114 @@ impl HttpTest for GetBytecodeTest { async fn getting_bytecodes() { test_http_server(GetBytecodeTest).await; } + +#[derive(Debug)] +struct FeeHistoryTest; + +#[async_trait] +impl HttpTest for FeeHistoryTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut connection = pool.connection().await?; + let block1 = L2BlockHeader { + batch_fee_input: scaled_sensible_fee_input(1.0), + base_fee_per_gas: 100, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block1, &[]).await?; + let block2 = L2BlockHeader { + batch_fee_input: scaled_sensible_fee_input(2.0), + base_fee_per_gas: 200, + ..create_l2_block(2) + }; + store_custom_l2_block(&mut connection, &block2, &[]).await?; + + let all_pubdata_prices = [ + 0, + block1.batch_fee_input.fair_pubdata_price(), + block2.batch_fee_input.fair_pubdata_price(), + ] + .map(U256::from); + + let history = client + .fee_history(1_000.into(), api::BlockNumber::Latest, vec![]) + .await?; + assert_eq!(history.inner.oldest_block, 0.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [0, 100, 200, 200].map(U256::from) // The latest value is duplicated + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices); + // Values below are not filled. + assert_eq!(history.inner.gas_used_ratio, [0.0; 3]); + assert_eq!(history.inner.base_fee_per_blob_gas, [U256::zero(); 4]); + assert_eq!(history.inner.blob_gas_used_ratio, [0.0; 3]); + + // Check supplying hexadecimal block count + let hex_history: api::FeeHistory = client + .request( + "eth_feeHistory", + rpc_params!["0xaa", "latest", [] as [f64; 0]], + ) + .await?; + assert_eq!(hex_history, history); + + // ...and explicitly decimal count (which should've been supplied in the first call) for exhaustiveness + let dec_history: api::FeeHistory = client + .request( + "eth_feeHistory", + rpc_params![1_000, "latest", [] as [f64; 0]], + ) + .await?; + assert_eq!(dec_history, history); + + // Check partial histories: blocks 0..=1 + let history = client + .fee_history(1_000.into(), api::BlockNumber::Number(1.into()), vec![]) + .await?; + assert_eq!(history.inner.oldest_block, 0.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [0, 100, 100].map(U256::from) + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[..2]); + + // Blocks 1..=2 + let history = client + .fee_history(2.into(), api::BlockNumber::Latest, vec![]) + .await?; + assert_eq!(history.inner.oldest_block, 1.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [100, 200, 200].map(U256::from) + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[1..]); + + // Blocks 1..=1 + let history = client + .fee_history(1.into(), api::BlockNumber::Number(1.into()), vec![]) + .await?; + assert_eq!(history.inner.oldest_block, 1.into()); + assert_eq!(history.inner.base_fee_per_gas, [100, 100].map(U256::from)); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[1..2]); + + // Non-existing newest block. + let err = client + .fee_history(1000.into(), api::BlockNumber::Number(100.into()), vec![]) + .await + .unwrap_err(); + assert_matches!( + err, + ClientError::Call(err) if err.code() == INVALID_PARAMS_CODE + ); + Ok(()) + } +} + +#[tokio::test] +async fn getting_fee_history() { + test_http_server(FeeHistoryTest).await; +} diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index e29ea246213..1f843e06fab 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -12,14 +12,10 @@ use api::state_override::{OverrideAccount, StateOverride}; use zksync_multivm::interface::{ ExecutionResult, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, }; -use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - api::ApiStorageLog, - fee_model::{BatchFeeInput, FeeParams}, - get_intrinsic_constants, - transaction_request::CallRequest, - K256PrivateKey, L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, - U256, + api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, + transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, + StorageLogKind, StorageLogWithPreviousValue, U256, }; use zksync_utils::u256_to_h256; use zksync_vm_executor::oneshot::MockOneshotExecutor; @@ -42,11 +38,7 @@ impl ExpectedFeeInput { fn expect_for_block(&self, number: api::BlockNumber, scale: f64) { *self.0.lock().unwrap() = match number { api::BlockNumber::Number(number) => create_l2_block(number.as_u32()).batch_fee_input, - _ => ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - scale, - scale, - ), + _ => scaled_sensible_fee_input(scale), }; } @@ -165,12 +157,7 @@ impl HttpTest for CallTest { // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. let mut block_header = create_l2_block(2); - block_header.batch_fee_input = - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 2.5, - 2.5, - ); + block_header.batch_fee_input = scaled_sensible_fee_input(2.5); store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); @@ -607,12 +594,7 @@ impl HttpTest for TraceCallTest { // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. let mut block_header = create_l2_block(2); - block_header.batch_fee_input = - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 3.0, - 3.0, - ); + block_header.batch_fee_input = scaled_sensible_fee_input(3.0); store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); From bd4b9f719b26c587eb0d1d376425ee3bce1b0bbe Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Mon, 14 Oct 2024 15:12:12 +0200 Subject: [PATCH 057/140] ci: Fix new stage build CI (#3089) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Use correct matrix name. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/new-build-prover-template.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml index 944551c9871..53a9ed226a1 100644 --- a/.github/workflows/new-build-prover-template.yml +++ b/.github/workflows/new-build-prover-template.yml @@ -188,12 +188,12 @@ jobs: run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} - name: Login and push to Europe GAR run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} From a089f3feb916ccc9007d9c32ec909db694b7d9f4 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 14 Oct 2024 17:07:15 +0300 Subject: [PATCH 058/140] fix(vm): Check protocol version for fast VM (#3080) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixes a regression introduced by https://github.com/matter-labs/zksync-era/pull/2915: The protocol version is now not checked before instantiating a fast VM. ## Why ❔ Without this check, fast VM can be used with old protocol versions, which most likely will cause divergencies. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- Cargo.lock | 1 + core/lib/multivm/src/lib.rs | 2 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 7 +++ core/lib/multivm/src/vm_instance.rs | 10 +++- core/lib/vm_executor/Cargo.toml | 3 ++ core/lib/vm_executor/src/batch/factory.rs | 52 +++++++++++++++++++++ core/lib/vm_executor/src/lib.rs | 2 + core/lib/vm_executor/src/testonly.rs | 45 ++++++++++++++++++ 8 files changed, 120 insertions(+), 2 deletions(-) create mode 100644 core/lib/vm_executor/src/testonly.rs diff --git a/Cargo.lock b/Cargo.lock index f9f7a88764e..3913b27438e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11341,6 +11341,7 @@ name = "zksync_vm_executor" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "async-trait", "once_cell", "tokio", diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index e171a78e179..520274c14ae 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -16,7 +16,7 @@ pub use crate::{ vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_fast, vm_latest, vm_m5, vm_m6, vm_refunds_enhancement, vm_virtual_blocks, }, - vm_instance::{FastVmInstance, LegacyVmInstance}, + vm_instance::{is_supported_by_fast_vm, FastVmInstance, LegacyVmInstance}, }; mod glue; diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 0c20af57e03..39c9b3c5656 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -40,6 +40,7 @@ use crate::{ VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, VmTrackingContracts, }, + is_supported_by_fast_vm, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ bootloader_state::utils::{apply_l2_block, apply_pubdata_to_memory}, @@ -104,6 +105,12 @@ pub struct Vm { impl Vm { pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { + assert!( + is_supported_by_fast_vm(system_env.version), + "Protocol version {:?} is not supported by fast VM", + system_env.version + ); + let default_aa_code_hash = system_env .base_system_smart_contracts .default_aa diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index ac5693b6161..89707034523 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,6 +1,6 @@ use std::mem; -use zksync_types::{vm::VmVersion, Transaction}; +use zksync_types::{vm::VmVersion, ProtocolVersionId, Transaction}; use zksync_vm2::interface::Tracer; use crate::{ @@ -328,3 +328,11 @@ impl FastVmInstance { Self::Shadowed(ShadowedFastVm::new(l1_batch_env, system_env, storage_view)) } } + +/// Checks whether the protocol version is supported by the fast VM. +pub fn is_supported_by_fast_vm(protocol_version: ProtocolVersionId) -> bool { + matches!( + protocol_version.into(), + VmVersion::Vm1_5_0IncreasedBootloaderMemory + ) +} diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index 089c2a9bcca..a967aaa969a 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -23,3 +23,6 @@ tokio.workspace = true anyhow.workspace = true tracing.workspace = true vise.workspace = true + +[dev-dependencies] +assert_matches.workspace = true diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 146f0bb4e5c..bc19086c969 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -12,6 +12,7 @@ use zksync_multivm::{ ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, + is_supported_by_fast_vm, tracers::CallTracer, vm_fast, vm_latest::HistoryEnabled, @@ -159,6 +160,10 @@ impl BatchVm { storage_ptr: StoragePtr>, mode: FastVmMode, ) -> Self { + if !is_supported_by_fast_vm(system_env.version) { + return Self::Legacy(LegacyVmInstance::new(l1_batch_env, system_env, storage_ptr)); + } + match mode { FastVmMode::Old => { Self::Legacy(LegacyVmInstance::new(l1_batch_env, system_env, storage_ptr)) @@ -443,3 +448,50 @@ impl CommandReceiver { } } } + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use zksync_multivm::interface::{storage::InMemoryStorage, TxExecutionMode}; + use zksync_types::ProtocolVersionId; + + use super::*; + use crate::testonly::{default_l1_batch_env, default_system_env, FAST_VM_MODES}; + + #[test] + fn selecting_vm_for_execution() { + let l1_batch_env = default_l1_batch_env(1); + let mut system_env = SystemEnv { + version: ProtocolVersionId::Version22, + ..default_system_env(TxExecutionMode::VerifyExecute) + }; + let storage = StorageView::new(InMemoryStorage::default()).to_rc_ptr(); + for mode in FAST_VM_MODES { + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + mode, + ); + assert_matches!(vm, BatchVm::Legacy(_)); + } + + system_env.version = ProtocolVersionId::latest(); + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + FastVmMode::Old, + ); + assert_matches!(vm, BatchVm::Legacy(_)); + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + FastVmMode::New, + ); + assert_matches!(vm, BatchVm::Fast(FastVmInstance::Fast(_))); + let vm = BatchVm::<_, ()>::new(l1_batch_env, system_env, storage, FastVmMode::Shadow); + assert_matches!(vm, BatchVm::Fast(FastVmInstance::Shadowed(_))); + } +} diff --git a/core/lib/vm_executor/src/lib.rs b/core/lib/vm_executor/src/lib.rs index 1a0fbb002df..83edb77fd62 100644 --- a/core/lib/vm_executor/src/lib.rs +++ b/core/lib/vm_executor/src/lib.rs @@ -9,3 +9,5 @@ pub mod batch; pub mod oneshot; mod shared; pub mod storage; +#[cfg(test)] +mod testonly; diff --git a/core/lib/vm_executor/src/testonly.rs b/core/lib/vm_executor/src/testonly.rs new file mode 100644 index 00000000000..5bcd604a432 --- /dev/null +++ b/core/lib/vm_executor/src/testonly.rs @@ -0,0 +1,45 @@ +use once_cell::sync::Lazy; +use zksync_contracts::BaseSystemContracts; +use zksync_multivm::{ + interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, vm::FastVmMode, Address, L1BatchNumber, + L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, +}; + +static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +pub(crate) const FAST_VM_MODES: [FastVmMode; 3] = + [FastVmMode::Old, FastVmMode::New, FastVmMode::Shadow]; + +pub(crate) fn default_system_env(execution_mode: TxExecutionMode) -> SystemEnv { + SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BASE_SYSTEM_CONTRACTS.clone(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::default(), + } +} + +pub(crate) fn default_l1_batch_env(number: u32) -> L1BatchEnv { + L1BatchEnv { + previous_batch_hash: Some(H256::zero()), + number: L1BatchNumber(number), + timestamp: number.into(), + fee_account: Address::repeat_byte(0x22), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number, + timestamp: number.into(), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(number - 1)), + max_virtual_blocks_to_create: 1, + }, + fee_input: BatchFeeInput::sensible_l1_pegged_default(), + } +} From 0d63c0f2713c1c523ee153cc82673c92655c8973 Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Mon, 14 Oct 2024 16:19:16 +0200 Subject: [PATCH 059/140] chore(zkstack_cli): Remove temporary performance comparison code (#3090) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove temporary performance comparison code. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/vm-perf-comparison.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index ccf8f370267..49830a30cc1 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -49,8 +49,8 @@ jobs: - name: run benchmarks on base branch shell: bash run: | - ci_run zkt || ci_run zkstackup -g --local # TODO remove zkt in an upcoming PR - ci_run zk_supervisor contracts --system-contracts || ci_run zkstack dev contracts --system-contracts # TODO remove zk_supervisor in an upcoming PR + ci_run zkstackup -g --local + ci_run zkstack dev contracts --system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes From 602950c2428cb904b750933bfa98c2a7c7c87486 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Mon, 14 Oct 2024 16:27:46 +0200 Subject: [PATCH 060/140] ci: Revert back to old builder workflows (#3091) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Revert back to old builders workflows. Add correct protocol_version to WVGs tags instead of empty string. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/new-build-prover-template.yml | 8 ++++---- .github/workflows/release-test-stage.yml | 9 +++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml index 53a9ed226a1..5d42696c0b2 100644 --- a/.github/workflows/new-build-prover-template.yml +++ b/.github/workflows/new-build-prover-template.yml @@ -188,12 +188,12 @@ jobs: run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} - name: Login and push to Europe GAR run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 5767584d5e1..18708420dab 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -61,7 +61,7 @@ jobs: build-push-core-images: name: Build and push images needs: [setup, changed_files] - uses: ./.github/workflows/new-build-core-template.yml + uses: ./.github/workflows/build-core-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} @@ -84,7 +84,7 @@ jobs: build-push-contract-verifier: name: Build and push images needs: [setup, changed_files] - uses: ./.github/workflows/new-build-contract-verifier-template.yml + uses: ./.github/workflows/build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} @@ -95,7 +95,7 @@ jobs: build-push-prover-images: name: Build and push images needs: [setup, changed_files] - uses: ./.github/workflows/new-build-prover-template.yml + uses: ./.github/workflows/build-prover-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} @@ -108,10 +108,11 @@ jobs: build-push-witness-generator-image-avx512: name: Build and push prover images with avx512 instructions needs: [setup, changed_files] - uses: ./.github/workflows/new-build-witness-generator-template.yml + uses: ./.github/workflows/build-witness-generator-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 + ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl " secrets: From 977d157a77b61ddd86a3f6853698c8ddbaadf087 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 15 Oct 2024 11:35:51 +0400 Subject: [PATCH 061/140] chore(main): release core 24.29.0 (#3024) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit :robot: I have created a release *beep* *boop* --- ## [24.29.0](https://github.com/matter-labs/zksync-era/compare/core-v24.28.0...core-v24.29.0) (2024-10-14) ### Features * Add initial version prover_autoscaler ([#2993](https://github.com/matter-labs/zksync-era/issues/2993)) ([ebf9604](https://github.com/matter-labs/zksync-era/commit/ebf9604c5ab2a1cae1ffd2f9c922f35a1d0ad876)) * add metric to track current cbt ratio ([#3020](https://github.com/matter-labs/zksync-era/issues/3020)) ([3fd2fb1](https://github.com/matter-labs/zksync-era/commit/3fd2fb14e7283c6858731e162522e70051a8e162)) * **configs:** Add port parameter to ConsensusConfig ([#2986](https://github.com/matter-labs/zksync-era/issues/2986)) ([25112df](https://github.com/matter-labs/zksync-era/commit/25112df39d052f083bc45964f0298b3af5842cac)) * **configs:** Add port parameter to ConsensusConfig ([#3051](https://github.com/matter-labs/zksync-era/issues/3051)) ([038c397](https://github.com/matter-labs/zksync-era/commit/038c397ce842601da5109c460b09dbf9d51cf2fc)) * **consensus:** smooth transition to p2p syncing (BFT-515) ([#3075](https://github.com/matter-labs/zksync-era/issues/3075)) ([5d339b4](https://github.com/matter-labs/zksync-era/commit/5d339b46fee66bc3a45493586626d318380680dd)) * **consensus:** Support for syncing blocks before consensus genesis over p2p network ([#3040](https://github.com/matter-labs/zksync-era/issues/3040)) ([d3edc3d](https://github.com/matter-labs/zksync-era/commit/d3edc3d817c151ed00d4fa822fdae0a746e33356)) * **en:** periodically fetch bridge addresses ([#2949](https://github.com/matter-labs/zksync-era/issues/2949)) ([e984bfb](https://github.com/matter-labs/zksync-era/commit/e984bfb8a243bc746549ab9347dc0a367fe02790)) * **eth-sender:** add time_in_mempool_cap config ([#3018](https://github.com/matter-labs/zksync-era/issues/3018)) ([f6d86bd](https://github.com/matter-labs/zksync-era/commit/f6d86bd7935a1cdbb528b13437424031fda3cb8e)) * **eth-watch:** catch another reth error ([#3026](https://github.com/matter-labs/zksync-era/issues/3026)) ([4640c42](https://github.com/matter-labs/zksync-era/commit/4640c4233af46c97f207d2dbce5fedd1bcb66c43)) * Handle new yul compilation flow ([#3038](https://github.com/matter-labs/zksync-era/issues/3038)) ([4035361](https://github.com/matter-labs/zksync-era/commit/40353616f278800dc80fcbe5f2a6483019033b20)) * **state-keeper:** pre-insert unsealed L1 batches ([#2846](https://github.com/matter-labs/zksync-era/issues/2846)) ([e5b5a3b](https://github.com/matter-labs/zksync-era/commit/e5b5a3b7b62e8d4035fe89c2a287bf3606d17bc5)) * **vm:** EVM emulator support – base ([#2979](https://github.com/matter-labs/zksync-era/issues/2979)) ([deafa46](https://github.com/matter-labs/zksync-era/commit/deafa460715334a77edf9fe8aa76fa90029342c4)) * **zk_toolbox:** added support for setting attester committee defined in a separate file ([#2992](https://github.com/matter-labs/zksync-era/issues/2992)) ([6105514](https://github.com/matter-labs/zksync-era/commit/610551427d5ab129f91e69b5efb318da917457d7)) * **zk_toolbox:** Redesign zk_toolbox commands ([#3003](https://github.com/matter-labs/zksync-era/issues/3003)) ([114834f](https://github.com/matter-labs/zksync-era/commit/114834f357421c62d596a1954fac8ce615cfde49)) * **zktoolbox:** added checking the contract owner in set-attester-committee command ([#3061](https://github.com/matter-labs/zksync-era/issues/3061)) ([9b0a606](https://github.com/matter-labs/zksync-era/commit/9b0a6067923c5276f560f3abccedc4e6a5167dda)) ### Bug Fixes * **api:** Accept integer block count in `eth_feeHistory` ([#3077](https://github.com/matter-labs/zksync-era/issues/3077)) ([4d527d4](https://github.com/matter-labs/zksync-era/commit/4d527d4b44b6b083e2a813d48c79d8021ea6f843)) * **api:** Adapt `eth_getCode` to EVM emulator ([#3073](https://github.com/matter-labs/zksync-era/issues/3073)) ([15fe5a6](https://github.com/matter-labs/zksync-era/commit/15fe5a62f03cd103afd7fa5eb03e27db25686ba9)) * bincode deserialization for VM run data ([#3044](https://github.com/matter-labs/zksync-era/issues/3044)) ([b0ec79f](https://github.com/matter-labs/zksync-era/commit/b0ec79fcb7fa120f095d987f53c67fdab92e2c79)) * bincode deserialize for WitnessInputData ([#3055](https://github.com/matter-labs/zksync-era/issues/3055)) ([91d0595](https://github.com/matter-labs/zksync-era/commit/91d0595631cc5f5bffc42a4b04d5015d2be659b1)) * **external-node:** make fetcher rely on unsealed batches ([#3088](https://github.com/matter-labs/zksync-era/issues/3088)) ([bb5d147](https://github.com/matter-labs/zksync-era/commit/bb5d1470d5e1e8e69d9b79c60284ea8adaee4038)) * **state-keeper:** ensure unsealed batch is present during IO init ([#3071](https://github.com/matter-labs/zksync-era/issues/3071)) ([bdeb411](https://github.com/matter-labs/zksync-era/commit/bdeb411c593ac3d5e16158e64c4210bb00edcb0c)) * **vm:** Check protocol version for fast VM ([#3080](https://github.com/matter-labs/zksync-era/issues/3080)) ([a089f3f](https://github.com/matter-labs/zksync-era/commit/a089f3feb916ccc9007d9c32ec909db694b7d9f4)) * **vm:** Prepare new VM for use in API server and fix divergences ([#2994](https://github.com/matter-labs/zksync-era/issues/2994)) ([741b77e](https://github.com/matter-labs/zksync-era/commit/741b77e080f75c6a93d3ee779b1c9ce4297618f9)) ### Reverts * **configs:** Add port parameter to ConsensusConfig ([#2986](https://github.com/matter-labs/zksync-era/issues/2986)) ([#3046](https://github.com/matter-labs/zksync-era/issues/3046)) ([abe35bf](https://github.com/matter-labs/zksync-era/commit/abe35bf7aea1120b77fdbd413d927e45da48d26c)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --------- Co-authored-by: zksync-era-bot --- .github/release-please/manifest.json | 2 +- Cargo.lock | 2 +- core/CHANGELOG.md | 38 ++++++++++++++++++++++++++++ core/bin/external_node/Cargo.toml | 2 +- 4 files changed, 41 insertions(+), 3 deletions(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index ca19e91219d..a56866a8bd7 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.28.0", + "core": "24.29.0", "prover": "16.5.0", "zkstack_cli": "0.1.2" } diff --git a/Cargo.lock b/Cargo.lock index 3913b27438e..327b16eeaa4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10201,7 +10201,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.28.0" +version = "24.29.0" dependencies = [ "anyhow", "assert_matches", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index b2f27a6630c..59b49af1554 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,43 @@ # Changelog +## [24.29.0](https://github.com/matter-labs/zksync-era/compare/core-v24.28.0...core-v24.29.0) (2024-10-14) + + +### Features + +* Add initial version prover_autoscaler ([#2993](https://github.com/matter-labs/zksync-era/issues/2993)) ([ebf9604](https://github.com/matter-labs/zksync-era/commit/ebf9604c5ab2a1cae1ffd2f9c922f35a1d0ad876)) +* add metric to track current cbt ratio ([#3020](https://github.com/matter-labs/zksync-era/issues/3020)) ([3fd2fb1](https://github.com/matter-labs/zksync-era/commit/3fd2fb14e7283c6858731e162522e70051a8e162)) +* **configs:** Add port parameter to ConsensusConfig ([#2986](https://github.com/matter-labs/zksync-era/issues/2986)) ([25112df](https://github.com/matter-labs/zksync-era/commit/25112df39d052f083bc45964f0298b3af5842cac)) +* **configs:** Add port parameter to ConsensusConfig ([#3051](https://github.com/matter-labs/zksync-era/issues/3051)) ([038c397](https://github.com/matter-labs/zksync-era/commit/038c397ce842601da5109c460b09dbf9d51cf2fc)) +* **consensus:** smooth transition to p2p syncing (BFT-515) ([#3075](https://github.com/matter-labs/zksync-era/issues/3075)) ([5d339b4](https://github.com/matter-labs/zksync-era/commit/5d339b46fee66bc3a45493586626d318380680dd)) +* **consensus:** Support for syncing blocks before consensus genesis over p2p network ([#3040](https://github.com/matter-labs/zksync-era/issues/3040)) ([d3edc3d](https://github.com/matter-labs/zksync-era/commit/d3edc3d817c151ed00d4fa822fdae0a746e33356)) +* **en:** periodically fetch bridge addresses ([#2949](https://github.com/matter-labs/zksync-era/issues/2949)) ([e984bfb](https://github.com/matter-labs/zksync-era/commit/e984bfb8a243bc746549ab9347dc0a367fe02790)) +* **eth-sender:** add time_in_mempool_cap config ([#3018](https://github.com/matter-labs/zksync-era/issues/3018)) ([f6d86bd](https://github.com/matter-labs/zksync-era/commit/f6d86bd7935a1cdbb528b13437424031fda3cb8e)) +* **eth-watch:** catch another reth error ([#3026](https://github.com/matter-labs/zksync-era/issues/3026)) ([4640c42](https://github.com/matter-labs/zksync-era/commit/4640c4233af46c97f207d2dbce5fedd1bcb66c43)) +* Handle new yul compilation flow ([#3038](https://github.com/matter-labs/zksync-era/issues/3038)) ([4035361](https://github.com/matter-labs/zksync-era/commit/40353616f278800dc80fcbe5f2a6483019033b20)) +* **state-keeper:** pre-insert unsealed L1 batches ([#2846](https://github.com/matter-labs/zksync-era/issues/2846)) ([e5b5a3b](https://github.com/matter-labs/zksync-era/commit/e5b5a3b7b62e8d4035fe89c2a287bf3606d17bc5)) +* **vm:** EVM emulator support – base ([#2979](https://github.com/matter-labs/zksync-era/issues/2979)) ([deafa46](https://github.com/matter-labs/zksync-era/commit/deafa460715334a77edf9fe8aa76fa90029342c4)) +* **zk_toolbox:** added support for setting attester committee defined in a separate file ([#2992](https://github.com/matter-labs/zksync-era/issues/2992)) ([6105514](https://github.com/matter-labs/zksync-era/commit/610551427d5ab129f91e69b5efb318da917457d7)) +* **zk_toolbox:** Redesign zk_toolbox commands ([#3003](https://github.com/matter-labs/zksync-era/issues/3003)) ([114834f](https://github.com/matter-labs/zksync-era/commit/114834f357421c62d596a1954fac8ce615cfde49)) +* **zktoolbox:** added checking the contract owner in set-attester-committee command ([#3061](https://github.com/matter-labs/zksync-era/issues/3061)) ([9b0a606](https://github.com/matter-labs/zksync-era/commit/9b0a6067923c5276f560f3abccedc4e6a5167dda)) + + +### Bug Fixes + +* **api:** Accept integer block count in `eth_feeHistory` ([#3077](https://github.com/matter-labs/zksync-era/issues/3077)) ([4d527d4](https://github.com/matter-labs/zksync-era/commit/4d527d4b44b6b083e2a813d48c79d8021ea6f843)) +* **api:** Adapt `eth_getCode` to EVM emulator ([#3073](https://github.com/matter-labs/zksync-era/issues/3073)) ([15fe5a6](https://github.com/matter-labs/zksync-era/commit/15fe5a62f03cd103afd7fa5eb03e27db25686ba9)) +* bincode deserialization for VM run data ([#3044](https://github.com/matter-labs/zksync-era/issues/3044)) ([b0ec79f](https://github.com/matter-labs/zksync-era/commit/b0ec79fcb7fa120f095d987f53c67fdab92e2c79)) +* bincode deserialize for WitnessInputData ([#3055](https://github.com/matter-labs/zksync-era/issues/3055)) ([91d0595](https://github.com/matter-labs/zksync-era/commit/91d0595631cc5f5bffc42a4b04d5015d2be659b1)) +* **external-node:** make fetcher rely on unsealed batches ([#3088](https://github.com/matter-labs/zksync-era/issues/3088)) ([bb5d147](https://github.com/matter-labs/zksync-era/commit/bb5d1470d5e1e8e69d9b79c60284ea8adaee4038)) +* **state-keeper:** ensure unsealed batch is present during IO init ([#3071](https://github.com/matter-labs/zksync-era/issues/3071)) ([bdeb411](https://github.com/matter-labs/zksync-era/commit/bdeb411c593ac3d5e16158e64c4210bb00edcb0c)) +* **vm:** Check protocol version for fast VM ([#3080](https://github.com/matter-labs/zksync-era/issues/3080)) ([a089f3f](https://github.com/matter-labs/zksync-era/commit/a089f3feb916ccc9007d9c32ec909db694b7d9f4)) +* **vm:** Prepare new VM for use in API server and fix divergences ([#2994](https://github.com/matter-labs/zksync-era/issues/2994)) ([741b77e](https://github.com/matter-labs/zksync-era/commit/741b77e080f75c6a93d3ee779b1c9ce4297618f9)) + + +### Reverts + +* **configs:** Add port parameter to ConsensusConfig ([#2986](https://github.com/matter-labs/zksync-era/issues/2986)) ([#3046](https://github.com/matter-labs/zksync-era/issues/3046)) ([abe35bf](https://github.com/matter-labs/zksync-era/commit/abe35bf7aea1120b77fdbd413d927e45da48d26c)) + ## [24.28.0](https://github.com/matter-labs/zksync-era/compare/core-v24.27.0...core-v24.28.0) (2024-10-02) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 086d381ecc3..25f2400c79b 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.28.0" # x-release-please-version +version = "24.29.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true From c1cb30e59ca1d0b5fea5fe0980082aea0eb04aa2 Mon Sep 17 00:00:00 2001 From: Jacob Lindahl Date: Tue, 15 Oct 2024 04:27:38 -0500 Subject: [PATCH 062/140] feat: Add CoinMarketCap external API (#2971) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds support for CoinMarketCap as a price API for base token prices. ## Why ❔ Currently, the only supported API is CoinGecko. It's not ideal to only support a single 3rd party API here, so this provides some more degrees of freedom, and makes base token less reliant on a single third-party provider. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. (See #2970) - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 2 + core/bin/zksync_server/src/node_builder.rs | 24 +- core/lib/external_price_api/Cargo.toml | 4 + core/lib/external_price_api/src/cmc_api.rs | 357 ++++++++++++++++++ core/lib/external_price_api/src/lib.rs | 1 + core/lib/external_price_api/src/tests.rs | 8 +- .../layers/base_token/coingecko_client.rs | 55 --- .../layers/base_token/forced_price_client.rs | 52 --- .../implementations/layers/base_token/mod.rs | 93 ++++- .../no_op_external_price_api_client.rs | 45 --- 10 files changed, 463 insertions(+), 178 deletions(-) create mode 100644 core/lib/external_price_api/src/cmc_api.rs delete mode 100644 core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs delete mode 100644 core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs delete mode 100644 core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs diff --git a/Cargo.lock b/Cargo.lock index 327b16eeaa4..11c37bda57f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10268,7 +10268,9 @@ dependencies = [ "rand 0.8.5", "reqwest 0.12.7", "serde", + "serde_json", "tokio", + "tracing", "url", "zksync_config", "zksync_types", diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 4600b0f9e54..9fdbc129b19 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -19,9 +19,7 @@ use zksync_node_framework::{ implementations::layers::{ base_token::{ base_token_ratio_persister::BaseTokenRatioPersisterLayer, - base_token_ratio_provider::BaseTokenRatioProviderLayer, - coingecko_client::CoingeckoClientLayer, forced_price_client::ForcedPriceClientLayer, - no_op_external_price_api_client::NoOpExternalPriceApiClientLayer, + base_token_ratio_provider::BaseTokenRatioProviderLayer, ExternalPriceApiLayer, }, circuit_breaker_checker::CircuitBreakerCheckerLayer, commitment_generator::CommitmentGeneratorLayer, @@ -557,24 +555,8 @@ impl MainNodeBuilder { fn add_external_api_client_layer(mut self) -> anyhow::Result { let config = try_load_config!(self.configs.external_price_api_client_config); - match config.source.as_str() { - CoingeckoClientLayer::CLIENT_NAME => { - self.node.add_layer(CoingeckoClientLayer::new(config)); - } - NoOpExternalPriceApiClientLayer::CLIENT_NAME => { - self.node.add_layer(NoOpExternalPriceApiClientLayer); - } - ForcedPriceClientLayer::CLIENT_NAME => { - self.node.add_layer(ForcedPriceClientLayer::new(config)); - } - _ => { - anyhow::bail!( - "Unknown external price API client source: {}", - config.source - ); - } - } - + self.node + .add_layer(ExternalPriceApiLayer::try_from(config)?); Ok(self) } diff --git a/core/lib/external_price_api/Cargo.toml b/core/lib/external_price_api/Cargo.toml index 3eee675b4e6..1e849f60006 100644 --- a/core/lib/external_price_api/Cargo.toml +++ b/core/lib/external_price_api/Cargo.toml @@ -20,8 +20,12 @@ serde.workspace = true reqwest = { workspace = true, features = ["json"] } fraction.workspace = true rand.workspace = true +tracing.workspace = true zksync_config.workspace = true zksync_types.workspace = true tokio.workspace = true + +[dev-dependencies] httpmock.workspace = true +serde_json.workspace = true diff --git a/core/lib/external_price_api/src/cmc_api.rs b/core/lib/external_price_api/src/cmc_api.rs new file mode 100644 index 00000000000..05cb5e4d728 --- /dev/null +++ b/core/lib/external_price_api/src/cmc_api.rs @@ -0,0 +1,357 @@ +use std::{collections::HashMap, str::FromStr}; + +use async_trait::async_trait; +use chrono::Utc; +use serde::Deserialize; +use tokio::sync::RwLock; +use url::Url; +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; + +use crate::{address_to_string, utils::get_fraction, PriceAPIClient}; + +const AUTH_HEADER: &str = "x-cmc_pro_api_key"; +const DEFAULT_API_URL: &str = "https://pro-api.coinmarketcap.com"; +const ALLOW_TOKENS_ONLY_ON_PLATFORM_ID: i32 = 1; // 1 = Ethereum +const REQUEST_QUOTE_IN_CURRENCY_ID: &str = "1027"; // 1027 = ETH + +#[derive(Debug)] +pub struct CmcPriceApiClient { + base_url: Url, + client: reqwest::Client, + cache_token_id_by_address: RwLock>, +} + +impl CmcPriceApiClient { + pub fn new(config: ExternalPriceApiClientConfig) -> Self { + let client = if let Some(api_key) = &config.api_key { + use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; + + let default_headers = HeaderMap::from_iter([( + HeaderName::from_static(AUTH_HEADER), + HeaderValue::from_str(api_key).expect("Failed to create header value"), + )]); + + reqwest::Client::builder().default_headers(default_headers) + } else { + reqwest::Client::builder() + } + .timeout(config.client_timeout()) + .build() + .expect("Failed to build reqwest client"); + + let base_url = config.base_url.unwrap_or(DEFAULT_API_URL.to_string()); + let base_url = Url::parse(&base_url).expect("Failed to parse CoinMarketCap API URL"); + + Self { + base_url, + client, + cache_token_id_by_address: RwLock::default(), + } + } + + fn get(&self, path: &str) -> reqwest::RequestBuilder { + self.client + .get(self.base_url.join(path).expect("Failed to join URL path")) + } + + async fn get_token_id(&self, address: Address) -> anyhow::Result { + if let Some(x) = self.cache_token_id_by_address.read().await.get(&address) { + return Ok(*x); + } + + let response = self.get("/v1/cryptocurrency/map").send().await?; + let status = response.status(); + if !status.is_success() { + return Err(anyhow::anyhow!( + "Http error while fetching token id. Status: {status}, token: {address}, msg: {}", + response.text().await.unwrap_or_default(), + )); + } + + let parsed = response.json::().await?; + for token_info in parsed.data { + if let Some(platform) = token_info.platform { + if platform.id == ALLOW_TOKENS_ONLY_ON_PLATFORM_ID + && Address::from_str(&platform.token_address).is_ok_and(|a| a == address) + { + if token_info.is_active != 1 { + tracing::warn!( + "CoinMarketCap API reports token {} ({}) on platform {} ({}) is not active", + address_to_string(&address), + token_info.name, + platform.id, + platform.name, + ); + } + + self.cache_token_id_by_address + .write() + .await + .insert(address, token_info.id); + return Ok(token_info.id); + } + } + } + + Err(anyhow::anyhow!("Token ID not found for address {address}")) + } + + async fn get_token_price_by_address(&self, address: Address) -> anyhow::Result { + let id = self.get_token_id(address).await?; + self.get_token_price_by_id(id).await + } + + async fn get_token_price_by_id(&self, id: i32) -> anyhow::Result { + let response = self + .get("/v2/cryptocurrency/quotes/latest") + .query(&[("id", id)]) + .query(&[("convert_id", REQUEST_QUOTE_IN_CURRENCY_ID)]) + .send() + .await?; + + let status = response.status(); + if !status.is_success() { + return Err(anyhow::anyhow!( + "Http error while fetching token price. Status: {status}, token: {id}, msg: {}", + response.text().await.unwrap_or_default(), + )); + } + + response + .json::() + .await? + .data + .get(&id) + .and_then(|data| data.quote.get(REQUEST_QUOTE_IN_CURRENCY_ID)) + .map(|mq| mq.price) + .ok_or_else(|| anyhow::anyhow!("Price not found for token: {id}")) + } +} + +#[derive(Debug, Deserialize)] +struct V2CryptocurrencyQuotesLatestResponse { + data: HashMap, +} + +#[derive(Debug, Deserialize)] +struct CryptocurrencyQuoteObject { + quote: HashMap, +} + +#[derive(Debug, Deserialize)] +struct MarketQuote { + price: f64, +} + +#[derive(Debug, Deserialize)] +struct V1CryptocurrencyMapResponse { + data: Vec, +} + +#[derive(Debug, Deserialize)] +struct CryptocurrencyObject { + id: i32, + name: String, + is_active: u8, + platform: Option, +} + +#[derive(Debug, Deserialize)] +struct CryptocurrencyPlatform { + id: i32, + name: String, + token_address: String, +} + +#[async_trait] +impl PriceAPIClient for CmcPriceApiClient { + async fn fetch_ratio(&self, token_address: Address) -> anyhow::Result { + let base_token_in_eth = self.get_token_price_by_address(token_address).await?; + let (term_ether, term_base_token) = get_fraction(base_token_in_eth)?; + + return Ok(BaseTokenAPIRatio { + numerator: term_base_token, + denominator: term_ether, + ratio_timestamp: Utc::now(), + }); + } +} + +#[cfg(test)] +mod tests { + use httpmock::prelude::*; + use serde_json::json; + + use super::*; + use crate::tests::*; + + fn make_client(server: &MockServer, api_key: Option) -> Box { + Box::new(CmcPriceApiClient::new(ExternalPriceApiClientConfig { + source: "coinmarketcap".to_string(), + base_url: Some(server.base_url()), + api_key, + client_timeout_ms: 5000, + forced: None, + })) + } + + fn make_mock_server() -> MockServer { + let mock_server = MockServer::start(); + // cryptocurrency map + mock_server.mock(|when, then| { + when.method(GET) + .header_exists(AUTH_HEADER) + .path("/v1/cryptocurrency/map"); + then.status(200) + .header("content-type", "application/json") + .json_body(json!({ + "status": { + "timestamp": "2024-09-25T11:29:38.440Z", + "error_code": 0, + "error_message": null, + "elapsed": 351, + "credit_count": 1, + "notice": null + }, + "data": [ + { + "id": 7083, + "rank": 26, + "name": "Uniswap", + "symbol": "UNI", + "slug": "uniswap", + "is_active": 1, + "first_historical_data": "2020-09-17T01:10:00.000Z", + "last_historical_data": "2024-09-25T11:25:00.000Z", + "platform": { + "id": 1, + "name": "Ethereum", + "symbol": "ETH", + "slug": "ethereum", + "token_address": "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984" + } + } + ] + })); + }); + + // cryptocurrency quote + mock_server.mock(|when, then| { + // TODO: check for api authentication header + when.method(GET) + .header_exists(AUTH_HEADER) + .path("/v2/cryptocurrency/quotes/latest") + .query_param("id", "7083") // Uniswap + .query_param("convert_id", "1027"); // Ether + then.status(200) + .header("content-type", "application/json") + .json_body(json!({ + "status": { + "timestamp": "2024-10-02T14:15:07.189Z", + "error_code": 0, + "error_message": null, + "elapsed": 39, + "credit_count": 1, + "notice": null + }, + "data": { + "7083": { + "id": 7083, + "name": "Uniswap", + "symbol": "UNI", + "slug": "uniswap", + "date_added": "2020-09-17T00:00:00.000Z", + "tags": [], + "max_supply": null, + "circulating_supply": 600294743.71, + "total_supply": 1000000000, + "platform": { + "id": 1027, + "name": "Ethereum", + "symbol": "ETH", + "slug": "ethereum", + "token_address": "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984" + }, + "is_active": 1, + "infinite_supply": false, + "cmc_rank": 22, + "is_fiat": 0, + "last_updated": "2024-10-02T14:13:00.000Z", + "quote": { + "1027": { + "price": 0.0028306661720164175, + "last_updated": "2024-10-02T14:12:00.000Z" + } + } + } + } + })); + }); + + mock_server + } + + #[tokio::test] + async fn mock_happy() { + let server = make_mock_server(); + let client = make_client( + &server, + Some("00000000-0000-0000-0000-000000000000".to_string()), + ); + + let token_address: Address = TEST_TOKEN_ADDRESS.parse().unwrap(); + + let api_price = client.fetch_ratio(token_address).await.unwrap(); + + const REPORTED_PRICE: f64 = 1_f64 / 0.0028306661720164175_f64; + const EPSILON: f64 = 0.000001_f64 * REPORTED_PRICE; + + assert!((approximate_value(&api_price) - REPORTED_PRICE).abs() < EPSILON); + } + + #[tokio::test] + #[should_panic = "Request did not match any route or mock"] + async fn mock_fail_no_api_key() { + let server = make_mock_server(); + let client = make_client(&server, None); + + let token_address: Address = TEST_TOKEN_ADDRESS.parse().unwrap(); + + client.fetch_ratio(token_address).await.unwrap(); + } + + #[tokio::test] + #[should_panic = "Token ID not found for address"] + async fn mock_fail_not_found() { + let server = make_mock_server(); + let client = make_client( + &server, + Some("00000000-0000-0000-0000-000000000000".to_string()), + ); + + let token_address: Address = Address::random(); + + client.fetch_ratio(token_address).await.unwrap(); + } + + #[tokio::test] + #[ignore = "run manually (accesses network); specify CoinMarketCap API key in env var CMC_API_KEY"] + async fn real_cmc_tether() { + let client = CmcPriceApiClient::new(ExternalPriceApiClientConfig { + api_key: Some(std::env::var("CMC_API_KEY").unwrap()), + base_url: None, + client_timeout_ms: 5000, + source: "coinmarketcap".to_string(), + forced: None, + }); + + let tether: Address = "0xdac17f958d2ee523a2206206994597c13d831ec7" + .parse() + .unwrap(); + + let r = client.get_token_price_by_address(tether).await.unwrap(); + + println!("{r}"); + } +} diff --git a/core/lib/external_price_api/src/lib.rs b/core/lib/external_price_api/src/lib.rs index 7a068f9b1cb..01fc433802b 100644 --- a/core/lib/external_price_api/src/lib.rs +++ b/core/lib/external_price_api/src/lib.rs @@ -1,3 +1,4 @@ +pub mod cmc_api; pub mod coingecko_api; pub mod forced_price_client; #[cfg(test)] diff --git a/core/lib/external_price_api/src/tests.rs b/core/lib/external_price_api/src/tests.rs index bb2af866cf5..fd6a8b9928f 100644 --- a/core/lib/external_price_api/src/tests.rs +++ b/core/lib/external_price_api/src/tests.rs @@ -2,13 +2,13 @@ use std::str::FromStr; use chrono::Utc; use httpmock::MockServer; -use zksync_types::Address; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; use crate::PriceAPIClient; const TIME_TOLERANCE_MS: i64 = 100; /// Uniswap (UNI) -const TEST_TOKEN_ADDRESS: &str = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"; +pub const TEST_TOKEN_ADDRESS: &str = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"; /// 1UNI = 0.00269ETH const TEST_TOKEN_PRICE_ETH: f64 = 0.00269; /// 1ETH = 371.74UNI; When converting gas price from ETH to UNI @@ -16,6 +16,10 @@ const TEST_TOKEN_PRICE_ETH: f64 = 0.00269; const TEST_BASE_PRICE: f64 = 371.74; const PRICE_FLOAT_COMPARE_TOLERANCE: f64 = 0.1; +pub(crate) fn approximate_value(api_price: &BaseTokenAPIRatio) -> f64 { + api_price.numerator.get() as f64 / api_price.denominator.get() as f64 +} + pub(crate) struct SetupResult { pub(crate) client: Box, } diff --git a/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs b/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs deleted file mode 100644 index 14ab568c2f3..00000000000 --- a/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::sync::Arc; - -use zksync_config::configs::ExternalPriceApiClientConfig; -use zksync_external_price_api::coingecko_api::CoinGeckoPriceAPIClient; - -use crate::{ - implementations::resources::price_api_client::PriceAPIClientResource, - wiring_layer::{WiringError, WiringLayer}, - IntoContext, -}; - -/// Wiring layer for `CoingeckoApiClient` -/// -/// Responsible for inserting a resource with a client to get base token prices from CoinGecko to be -/// used by the `BaseTokenRatioPersister`. -#[derive(Debug)] -pub struct CoingeckoClientLayer { - config: ExternalPriceApiClientConfig, -} - -impl CoingeckoClientLayer { - /// Identifier of used client type. - /// Can be used to choose the layer for the client based on configuration variables. - pub const CLIENT_NAME: &'static str = "coingecko"; -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub price_api_client: PriceAPIClientResource, -} - -impl CoingeckoClientLayer { - pub fn new(config: ExternalPriceApiClientConfig) -> Self { - Self { config } - } -} - -#[async_trait::async_trait] -impl WiringLayer for CoingeckoClientLayer { - type Input = (); - type Output = Output; - - fn layer_name(&self) -> &'static str { - "coingecko_api_client" - } - - async fn wire(self, _input: Self::Input) -> Result { - let cg_client = Arc::new(CoinGeckoPriceAPIClient::new(self.config)); - - Ok(Output { - price_api_client: cg_client.into(), - }) - } -} diff --git a/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs b/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs deleted file mode 100644 index 67785dc26ed..00000000000 --- a/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::sync::Arc; - -use zksync_config::configs::ExternalPriceApiClientConfig; -use zksync_external_price_api::forced_price_client::ForcedPriceClient; - -use crate::{ - implementations::resources::price_api_client::PriceAPIClientResource, - wiring_layer::{WiringError, WiringLayer}, - IntoContext, -}; - -/// Wiring layer for `ForcedPriceClient` -/// -/// Inserts a resource with a forced configured price to be used by the `BaseTokenRatioPersister`. -#[derive(Debug)] -pub struct ForcedPriceClientLayer { - config: ExternalPriceApiClientConfig, -} - -impl ForcedPriceClientLayer { - pub fn new(config: ExternalPriceApiClientConfig) -> Self { - Self { config } - } - - /// Identifier of used client type. - /// Can be used to choose the layer for the client based on configuration variables. - pub const CLIENT_NAME: &'static str = "forced"; -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub price_api_client: PriceAPIClientResource, -} - -#[async_trait::async_trait] -impl WiringLayer for ForcedPriceClientLayer { - type Input = (); - type Output = Output; - - fn layer_name(&self) -> &'static str { - "forced_price_client" - } - - async fn wire(self, _input: Self::Input) -> Result { - let forced_client = Arc::new(ForcedPriceClient::new(self.config)); - - Ok(Output { - price_api_client: forced_client.into(), - }) - } -} diff --git a/core/node/node_framework/src/implementations/layers/base_token/mod.rs b/core/node/node_framework/src/implementations/layers/base_token/mod.rs index 5b58527a3d8..7a63b573d78 100644 --- a/core/node/node_framework/src/implementations/layers/base_token/mod.rs +++ b/core/node/node_framework/src/implementations/layers/base_token/mod.rs @@ -1,5 +1,92 @@ +use std::{str::FromStr, sync::Arc}; + +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_external_price_api::{ + cmc_api::CmcPriceApiClient, coingecko_api::CoinGeckoPriceAPIClient, + forced_price_client::ForcedPriceClient, NoOpPriceAPIClient, +}; + +use crate::{ + implementations::resources::price_api_client::PriceAPIClientResource, IntoContext, WiringError, + WiringLayer, +}; + pub mod base_token_ratio_persister; pub mod base_token_ratio_provider; -pub mod coingecko_client; -pub mod forced_price_client; -pub mod no_op_external_price_api_client; + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] +enum ExternalPriceApiKind { + #[default] + NoOp, + Forced, + CoinGecko, + CoinMarketCap, +} + +#[derive(Debug, thiserror::Error)] +#[error("Unknown external price API client source: \"{0}\"")] +pub struct UnknownExternalPriceApiClientSourceError(String); + +impl FromStr for ExternalPriceApiKind { + type Err = UnknownExternalPriceApiClientSourceError; + + fn from_str(s: &str) -> Result { + Ok(match &s.to_lowercase()[..] { + "no-op" | "noop" => Self::NoOp, + "forced" => Self::Forced, + "coingecko" => Self::CoinGecko, + "coinmarketcap" => Self::CoinMarketCap, + _ => return Err(UnknownExternalPriceApiClientSourceError(s.to_owned())), + }) + } +} + +impl ExternalPriceApiKind { + fn instantiate(&self, config: ExternalPriceApiClientConfig) -> PriceAPIClientResource { + PriceAPIClientResource(match self { + Self::NoOp => Arc::new(NoOpPriceAPIClient {}), + Self::Forced => Arc::new(ForcedPriceClient::new(config)), + Self::CoinGecko => Arc::new(CoinGeckoPriceAPIClient::new(config)), + Self::CoinMarketCap => Arc::new(CmcPriceApiClient::new(config)), + }) + } +} + +#[derive(Debug)] +pub struct ExternalPriceApiLayer { + kind: ExternalPriceApiKind, + config: ExternalPriceApiClientConfig, +} + +impl TryFrom for ExternalPriceApiLayer { + type Error = UnknownExternalPriceApiClientSourceError; + + fn try_from(config: ExternalPriceApiClientConfig) -> Result { + Ok(Self { + kind: config.source.parse()?, + config, + }) + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub price_api_client: PriceAPIClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for ExternalPriceApiLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "external_price_api" + } + + async fn wire(self, _input: Self::Input) -> Result { + Ok(Output { + price_api_client: self.kind.instantiate(self.config), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs b/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs deleted file mode 100644 index 2bf5eda798f..00000000000 --- a/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::sync::Arc; - -use zksync_external_price_api::NoOpPriceAPIClient; - -use crate::{ - implementations::resources::price_api_client::PriceAPIClientResource, - wiring_layer::{WiringError, WiringLayer}, - IntoContext, -}; - -/// Wiring layer for `NoOpExternalPriceApiClient` -/// -/// Inserts a resource with a no-op client to get base token prices to be used by the `BaseTokenRatioPersister`. -#[derive(Debug)] -pub struct NoOpExternalPriceApiClientLayer; - -impl NoOpExternalPriceApiClientLayer { - /// Identifier of used client type. - /// Can be used to choose the layer for the client based on configuration variables. - pub const CLIENT_NAME: &'static str = "no-op"; -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub price_api_client: PriceAPIClientResource, -} - -#[async_trait::async_trait] -impl WiringLayer for NoOpExternalPriceApiClientLayer { - type Input = (); - type Output = Output; - - fn layer_name(&self) -> &'static str { - "no_op_external_price_api_client" - } - - async fn wire(self, _input: Self::Input) -> Result { - let no_op_client = Arc::new(NoOpPriceAPIClient {}); - - Ok(Output { - price_api_client: no_op_client.into(), - }) - } -} From c60a3482ee09b3e371163e62f49e83bc6d6f4548 Mon Sep 17 00:00:00 2001 From: koloz193 Date: Tue, 15 Oct 2024 08:18:03 -0400 Subject: [PATCH 063/140] feat(contracts)!: integrate protocol defense changes (#2737) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ The work done in the protocol defense project introduced a number of changes, namely custom errors in our solidity contracts. We need to update the server code to handle these new errors. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil Co-authored-by: Danil Co-authored-by: Stanislav Breadless --- .github/workflows/build-core-template.yml | 2 +- .../new-build-contract-verifier-template.yml | 11 +- .github/workflows/new-build-core-template.yml | 13 +- contracts | 2 +- .../system-constants-generator/src/utils.rs | 13 +- core/lib/basic_types/src/protocol_version.rs | 7 +- core/lib/contracts/src/lib.rs | 133 ++-- .../versions/vm_fast/tests/l1_tx_execution.rs | 7 +- .../versions/vm_fast/tests/nonce_holder.rs | 11 +- .../src/versions/vm_fast/tests/rollbacks.rs | 47 +- .../tests/tester/transaction_test_info.rs | 77 +-- .../src/versions/vm_fast/tests/utils.rs | 5 +- .../vm_latest/tests/l1_tx_execution.rs | 7 +- .../src/versions/vm_latest/tests/migration.rs | 51 -- .../src/versions/vm_latest/tests/mod.rs | 1 - .../versions/vm_latest/tests/nonce_holder.rs | 11 +- .../src/versions/vm_latest/tests/rollbacks.rs | 47 +- .../tests/tester/transaction_test_info.rs | 72 +-- .../src/versions/vm_latest/tests/utils.rs | 5 +- core/lib/vm_executor/src/oneshot/contracts.rs | 9 +- .../api_server/src/execution_sandbox/tests.rs | 5 +- core/node/api_server/src/testonly.rs | 2 +- core/node/consensus/src/registry/abi.rs | 3 +- .../ts-integration/tests/base-token.test.ts | 2 +- .../ts-integration/tests/contracts.test.ts | 2 +- .../tests/ts-integration/tests/system.test.ts | 2 +- core/tests/upgrade-test/tests/upgrade.test.ts | 4 +- core/tests/upgrade-test/tests/utils.ts | 4 +- docker/contract-verifier/Dockerfile | 3 +- docker/external-node/Dockerfile | 9 +- docker/server-v2/Dockerfile | 9 +- etc/env/base/chain.toml | 4 +- etc/env/base/contracts.toml | 8 +- etc/env/file_based/genesis.yaml | 13 +- etc/lint-config/ignore.yaml | 3 +- .../fee_estimate.yul/fee_estimate.yul.zbin | Bin 0 -> 75168 bytes .../gas_test.yul/gas_test.yul.zbin | Bin 0 -> 71264 bytes .../playground_batch.yul.zbin | Bin 0 -> 75360 bytes .../proved_batch.yul/proved_batch.yul.zbin | Bin 0 -> 71776 bytes .../1728066632-protocol-defense/common.json | 5 + .../stage/crypto.json | 6 + .../stage/facetCuts.json | 198 ++++++ .../stage/facets.json | 18 + .../stage/l2Upgrade.json | 394 ++++++++++++ .../stage/transactions.json | 253 ++++++++ etc/utils/src/index.ts | 1 + yarn.lock | 582 +++++++++++++++--- zkstack_cli/crates/common/src/contracts.rs | 42 ++ zkstack_cli/crates/common/src/lib.rs | 1 + .../crates/common/src/prerequisites.rs | 84 ++- .../src/commands/chain/deploy_l2_contracts.rs | 11 +- .../src/commands/dev/commands/contracts.rs | 57 +- .../src/commands/dev/commands/genesis.rs | 26 + .../zkstack/src/commands/dev/commands/mod.rs | 1 + .../src/commands/dev/commands/test/build.rs | 2 +- .../commands/dev/commands/test/integration.rs | 2 +- .../zkstack/src/commands/dev/messages.rs | 6 +- .../crates/zkstack/src/commands/dev/mod.rs | 10 +- .../zkstack/src/commands/ecosystem/init.rs | 5 +- 59 files changed, 1856 insertions(+), 452 deletions(-) delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/migration.rs create mode 100644 etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin create mode 100644 etc/upgrades/1728066632-protocol-defense/common.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/crypto.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/facets.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/transactions.json create mode 100644 zkstack_cli/crates/common/src/contracts.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 33053b6a400..18b444a99ed 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -128,7 +128,7 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run ./bin/zk || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - + - name: Install zkstack run: | ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml index 3fc83cc62eb..b5286782fad 100644 --- a/.github/workflows/new-build-contract-verifier-template.yml +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -103,8 +103,13 @@ jobs: crate: sqlx-cli tag: 0.8.1 - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + - name: Install foundry-zksync + run: | + mkdir ./foundry-zksync + curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz + tar zxf foundry_nightly_linux_amd64.tar.gz -C ./foundry-zksync + chmod +x ./foundry-zksync/forge ./foundry-zksync/cast + echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers shell: bash @@ -131,7 +136,7 @@ jobs: docker compose up -d postgres - name: Install zkstack - run: | + run: | ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup zkstackup --local || true diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml index 392acbc9f8f..e8a41a7e064 100644 --- a/.github/workflows/new-build-core-template.yml +++ b/.github/workflows/new-build-core-template.yml @@ -108,8 +108,13 @@ jobs: crate: sqlx-cli tag: 0.8.1 - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + - name: Install foundry-zksync + run: | + mkdir ./foundry-zksync + curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz + tar zxf foundry_nightly_linux_amd64.tar.gz -C ./foundry-zksync + chmod +x ./foundry-zksync/forge ./foundry-zksync/cast + echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers shell: bash @@ -136,10 +141,10 @@ jobs: docker compose up -d postgres - name: Install zkstack - run: | + run: | ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup zkstackup --local || true - + - name: build contracts shell: bash run: | diff --git a/contracts b/contracts index aafee035db8..84d5e3716f6 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit aafee035db892689df3f7afe4b89fd6467a39313 +Subproject commit 84d5e3716f645909e8144c7d50af9dd6dd9ded62 diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 8d36f734467..ce7182a3aa4 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -3,7 +3,7 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; use zksync_contracts::{ load_sys_contract, read_bootloader_code, read_bytecode_from_path, read_sys_contract_bytecode, - BaseSystemContracts, ContractLanguage, SystemContractCode, + read_zbin_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, }; use zksync_multivm::{ interface::{ @@ -171,9 +171,16 @@ pub(super) fn get_l1_txs(number_of_txs: usize) -> (Vec, Vec Vec { - read_bytecode_from_path(format!( + if let Some(contract) = read_bytecode_from_path(format!( "contracts/system-contracts/zkout/{test}.yul/contracts-preprocessed/bootloader/{test}.yul.json", - )) + )){ + contract + } else { + read_zbin_bytecode(format!( + "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", + test + )) + } } fn default_l1_batch() -> L1BatchEnv { diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 640a92c00da..e01586cdad7 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -68,15 +68,16 @@ pub enum ProtocolVersionId { Version23, Version24, Version25, + Version26, } impl ProtocolVersionId { pub const fn latest() -> Self { - Self::Version24 + Self::Version25 } pub const fn next() -> Self { - Self::Version25 + Self::Version26 } pub fn try_from_packed_semver(packed_semver: U256) -> Result { @@ -120,6 +121,7 @@ impl ProtocolVersionId { ProtocolVersionId::Version23 => VmVersion::Vm1_5_0SmallBootloaderMemory, ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, } } @@ -275,6 +277,7 @@ impl From for VmVersion { ProtocolVersionId::Version23 => VmVersion::Vm1_5_0SmallBootloaderMemory, ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, } } } diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 7e133f8dee3..a9e7324d5af 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -69,20 +69,21 @@ fn home_path() -> PathBuf { Workspace::locate().core() } -fn read_file_to_json_value(path: impl AsRef + std::fmt::Debug) -> serde_json::Value { +fn read_file_to_json_value(path: impl AsRef + std::fmt::Debug) -> Option { let zksync_home = home_path(); let path = Path::new(&zksync_home).join(path); - let file = - File::open(&path).unwrap_or_else(|e| panic!("Failed to open file {:?}: {}", path, e)); - serde_json::from_reader(BufReader::new(file)) - .unwrap_or_else(|e| panic!("Failed to parse file {:?}: {}", path, e)) + let file = File::open(&path).ok()?; + Some( + serde_json::from_reader(BufReader::new(file)) + .unwrap_or_else(|e| panic!("Failed to parse file {:?}: {}", path, e)), + ) } fn load_contract_if_present + std::fmt::Debug>(path: P) -> Option { let zksync_home = home_path(); let path = Path::new(&zksync_home).join(path); path.exists().then(|| { - serde_json::from_value(read_file_to_json_value(&path)["abi"].take()) + serde_json::from_value(read_file_to_json_value(&path).unwrap()["abi"].take()) .unwrap_or_else(|e| panic!("Failed to parse contract abi from file {:?}: {}", path, e)) }) } @@ -114,17 +115,26 @@ pub fn load_contract + std::fmt::Debug>(path: P) -> Contract { } pub fn load_sys_contract(contract_name: &str) -> Contract { - load_contract(format!( + if let Some(contract) = load_contract_if_present(format!( "contracts/system-contracts/artifacts-zk/contracts-preprocessed/{0}.sol/{0}.json", contract_name - )) + )) { + contract + } else { + load_contract(format!( + "contracts/system-contracts/zkout/{0}.sol/{0}.json", + contract_name + )) + } } -pub fn read_contract_abi(path: impl AsRef + std::fmt::Debug) -> String { - read_file_to_json_value(path)["abi"] - .as_str() - .expect("Failed to parse abi") - .to_string() +pub fn read_contract_abi(path: impl AsRef + std::fmt::Debug) -> Option { + Some( + read_file_to_json_value(path)?["abi"] + .as_str() + .expect("Failed to parse abi") + .to_string(), + ) } pub fn bridgehub_contract() -> Contract { @@ -200,7 +210,7 @@ pub fn l1_messenger_contract() -> Contract { /// Reads bytecode from the path RELATIVE to the Cargo workspace location. pub fn read_bytecode(relative_path: impl AsRef + std::fmt::Debug) -> Vec { - read_bytecode_from_path(relative_path) + read_bytecode_from_path(relative_path).expect("Exists") } pub fn eth_contract() -> Contract { @@ -212,17 +222,25 @@ pub fn known_codes_contract() -> Contract { } /// Reads bytecode from a given path. -pub fn read_bytecode_from_path(artifact_path: impl AsRef + std::fmt::Debug) -> Vec { - let artifact = read_file_to_json_value(&artifact_path); - - let bytecode = artifact["bytecode"] - .as_str() - .unwrap_or_else(|| panic!("Bytecode not found in {:?}", artifact_path)) - .strip_prefix("0x") - .unwrap_or_else(|| panic!("Bytecode in {:?} is not hex", artifact_path)); +pub fn read_bytecode_from_path( + artifact_path: impl AsRef + std::fmt::Debug, +) -> Option> { + let artifact = read_file_to_json_value(&artifact_path)?; + + let bytecode = if let Some(bytecode) = artifact["bytecode"].as_str() { + bytecode + .strip_prefix("0x") + .unwrap_or_else(|| panic!("Bytecode in {:?} is not hex", artifact_path)) + } else { + artifact["bytecode"]["object"] + .as_str() + .unwrap_or_else(|| panic!("Bytecode not found in {:?}", artifact_path)) + }; - hex::decode(bytecode) - .unwrap_or_else(|err| panic!("Can't decode bytecode in {:?}: {}", artifact_path, err)) + Some( + hex::decode(bytecode) + .unwrap_or_else(|err| panic!("Can't decode bytecode in {:?}: {}", artifact_path, err)), + ) } pub fn read_sys_contract_bytecode(directory: &str, name: &str, lang: ContractLanguage) -> Vec { @@ -230,7 +248,7 @@ pub fn read_sys_contract_bytecode(directory: &str, name: &str, lang: ContractLan } static DEFAULT_SYSTEM_CONTRACTS_REPO: Lazy = - Lazy::new(SystemContractsRepo::from_env); + Lazy::new(SystemContractsRepo::default); /// Structure representing a system contract repository - that allows /// fetching contracts that are located there. @@ -240,14 +258,16 @@ pub struct SystemContractsRepo { pub root: PathBuf, } -impl SystemContractsRepo { +impl Default for SystemContractsRepo { /// Returns the default system contracts repository with directory based on the Cargo workspace location. - pub fn from_env() -> Self { + fn default() -> Self { SystemContractsRepo { root: home_path().join("contracts/system-contracts"), } } +} +impl SystemContractsRepo { pub fn read_sys_contract_bytecode( &self, directory: &str, @@ -255,23 +275,48 @@ impl SystemContractsRepo { lang: ContractLanguage, ) -> Vec { match lang { - ContractLanguage::Sol => read_bytecode_from_path(self.root.join(format!( - "artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json", - directory, name - ))), + ContractLanguage::Sol => { + if let Some(contracts) = read_bytecode_from_path( + self.root + .join(format!("zkout/{0}{1}.sol/{1}.json", directory, name)), + ) { + contracts + } else { + read_bytecode_from_path(self.root.join(format!( + "artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json", + directory, name + ))) + .expect("One of the outputs should exists") + } + } ContractLanguage::Yul => { - let artifacts_path = self - .root - .join(format!("contracts-preprocessed/{}artifacts/", directory)); - read_yul_bytecode_by_path(artifacts_path, name) + if let Some(contract) = read_bytecode_from_path(self.root.join(format!( + "zkout/{name}.yul/contracts-preprocessed/{directory}/{name}.yul.json", + ))) { + contract + } else { + read_zbin_bytecode_from_path(self.root.join(format!( + "contracts-preprocessed/{0}artifacts/{1}.yul.zbin", + directory, name + ))) + } } } } } pub fn read_bootloader_code(bootloader_type: &str) -> Vec { - let artifacts_path = "contracts/system-contracts/bootloader/build/artifacts/"; - read_yul_bytecode(artifacts_path, bootloader_type) + if let Some(contract) = + read_bytecode_from_path(home_path().join("contracts/system-contracts").join(format!( + "zkout/{bootloader_type}.yul/contracts-preprocessed/bootloader/{bootloader_type}.yul.json", + ))) + { + return contract; + }; + read_zbin_bytecode(format!( + "contracts/system-contracts/bootloader/build/artifacts/{}.yul.zbin", + bootloader_type + )) } fn read_proved_batch_bootloader_bytecode() -> Vec { @@ -463,6 +508,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn playground_post_protocol_defense() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn estimate_gas_pre_virtual_blocks() -> Self { let bootloader_bytecode = read_zbin_bytecode( "etc/multivm_bootloaders/vm_1_3_2/fee_estimate.yul/fee_estimate.yul.zbin", @@ -526,6 +578,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn estimate_gas_post_protocol_defense() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn hashes(&self) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: self.bootloader.hash, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 1abb1e39e19..5897ec5f266 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -117,9 +117,8 @@ fn test_l1_tx_execution() { let res = vm.vm.execute(VmExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. However, the rewrite of the `basePubdataSpent` didn't happen, since it was the same - // as the start of the previous tx. Thus we have `+1` slot for the changed counter and `-1` slot for base pubdata spent - assert_eq!(res.initial_storage_writes, basic_initial_writes); + // We changed one slot inside contract. + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); // No repeated writes let repeated_writes = res.repeated_storage_writes; @@ -146,7 +145,7 @@ fn test_l1_tx_execution() { assert!(result.result.is_failed(), "The transaction should fail"); let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - assert_eq!(res.initial_storage_writes, basic_initial_writes); + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); assert_eq!(res.repeated_storage_writes, 1); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index f72e95da9f8..6d1e0f016e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -37,6 +37,7 @@ impl From for u8 { #[test] fn test_nonce_holder() { let mut account = Account::random(); + let hex_addr = hex::encode(account.address.to_fixed_bytes()); let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() @@ -92,7 +93,7 @@ fn test_nonce_holder() { run_nonce_test( 1u32, NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), + Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), "Allowed to set value under non sequential value", ); @@ -133,7 +134,7 @@ fn test_nonce_holder() { run_nonce_test( 10u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), "Allowed to reuse nonce below the minimal one", ); @@ -149,7 +150,7 @@ fn test_nonce_holder() { run_nonce_test( 13u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), "Allowed to reuse the same nonce twice", ); @@ -165,7 +166,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), + Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), "Allowed for incrementing min nonce too much", ); @@ -173,7 +174,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), + Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), "Allowed to leave nonce as unused", ); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index cff72d8ec5a..1ac14e01f8b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,7 +1,7 @@ use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Address, Execute, U256}; +use zksync_types::{Address, Execute, Nonce, U256}; use zksync_vm_interface::VmInterfaceExt; use crate::{ @@ -41,22 +41,40 @@ fn test_vm_rollbacks() { TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_0.clone(), false), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_1, false), // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_2.clone(), false), // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), ]); pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); @@ -134,12 +152,23 @@ fn test_vm_loadnext_rollbacks() { TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), TransactionTestInfo::new_rejected( loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), ), TransactionTestInfo::new_processed(loadnext_tx_1, false), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), TransactionTestInfo::new_processed(loadnext_tx_2, false), ]); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index e6506ff225b..6b1395f6634 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -1,6 +1,4 @@ -use std::fmt; - -use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; +use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160, U256}; use zksync_vm2::interface::{Event, StateInterface}; use super::VmTester; @@ -18,8 +16,8 @@ pub(crate) enum TxModifier { WrongSignatureLength, WrongSignature, WrongMagicValue, - WrongNonce, - NonceReused, + WrongNonce(Nonce, Nonce), + NonceReused(H160, Nonce), } #[derive(Debug, Clone)] @@ -44,15 +42,9 @@ impl From for ExpectedError { fn from(value: TxModifier) -> Self { let revert_reason = match value { TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], + data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45], }) } TxModifier::WrongSignature => { @@ -62,38 +54,35 @@ impl From for ExpectedError { }) } TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], + data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], }) } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + TxModifier::WrongNonce(expected, actual) => { + let function_selector = vec![98, 106, 222, 48]; + let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); + let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data }) } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], + TxModifier::NonceReused(addr, nonce) => { + let function_selector = vec![233, 10, 222, 212]; + let addr = addr.as_bytes().to_vec(); + // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field + let addr_padding = vec![0u8; 12]; + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data, }) } }; @@ -119,10 +108,10 @@ impl TransactionTestInfo { } TxModifier::WrongSignature => data.signature = vec![27u8; 65], TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { + TxModifier::WrongNonce(_, _) => { // Do not need to modify signature for nonce error } - TxModifier::NonceReused => { + TxModifier::NonceReused(_, _) => { // Do not need to modify signature for nonce error } } @@ -203,7 +192,7 @@ impl PartialEq for VmStateDump { } impl Vm { - fn dump_state(&self) -> VmStateDump { + fn dump_state(&self) -> VmStateDump { VmStateDump { state: self.inner.dump_state(), storage_writes: self.inner.get_storage_state().collect(), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index 76ca9bc5dd3..eebd825c045 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use ethabi::Contract; use once_cell::sync::Lazy; use zksync_contracts::{ - load_contract, read_bytecode, read_yul_bytecode, BaseSystemContracts, SystemContractCode, + load_contract, read_bootloader_code, read_bytecode, BaseSystemContracts, SystemContractCode, }; use zksync_types::{ utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H160, H256, @@ -64,8 +64,7 @@ pub(crate) fn read_test_contract() -> Vec { } pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let artifacts_path = "contracts/system-contracts/bootloader/tests/artifacts/"; - let bootloader_code = read_yul_bytecode(artifacts_path, test); + let bootloader_code = read_bootloader_code(test); let bootloader_hash = hash_bytecode(&bootloader_code); SystemContractCode { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 4bb32cdf7ae..e0e4e8228f9 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -112,9 +112,8 @@ fn test_l1_tx_execution() { let res = vm.vm.execute(VmExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. However, the rewrite of the `basePubdataSpent` didn't happen, since it was the same - // as the start of the previous tx. Thus we have `+1` slot for the changed counter and `-1` slot for base pubdata spent - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); + // We changed one slot inside contract. + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); // No repeated writes let repeated_writes = res.repeated_storage_writes; @@ -142,7 +141,7 @@ fn test_l1_tx_execution() { let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); + assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); } #[test] diff --git a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs b/core/lib/multivm/src/versions/vm_latest/tests/migration.rs deleted file mode 100644 index 5b8da255180..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs +++ /dev/null @@ -1,51 +0,0 @@ -use zksync_types::{get_code_key, H256, SYSTEM_CONTEXT_ADDRESS}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -/// This test checks that the new bootloader will work fine even if the previous system context contract is not -/// compatible with it, i.e. the bootloader will upgrade it before starting any transaction. -#[test] -fn test_migration_for_system_context_aa_interaction() { - let mut storage = get_empty_storage(); - // We will set the system context bytecode to zero. - storage.set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::zero()); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Now, we will just proceed with standard transaction execution. - // The bootloader should be able to update system context regardless of whether - // the upgrade transaction is there or not. - let account = &mut vm.rich_accounts[0]; - let counter = read_test_contract(); - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful {:#?}", - result.result - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!( - !batch_result.result.is_failed(), - "Batch transaction wasn't successful {:#?}", - batch_result.result - ); -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index 112be637fe0..fadb05cc4d1 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -14,7 +14,6 @@ mod get_used_contracts; mod is_write_initial; mod l1_tx_execution; mod l2_blocks; -mod migration; mod nonce_holder; mod precompiles; mod prestate_tracer; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 6be49367d39..397790a7c95 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -40,6 +40,7 @@ impl From for u8 { #[test] fn test_nonce_holder() { let mut account = Account::random(); + let hex_addr = hex::encode(account.address.to_fixed_bytes()); let mut vm = VmTesterBuilder::new(HistoryEnabled) .with_empty_in_memory_storage() @@ -99,7 +100,7 @@ fn test_nonce_holder() { run_nonce_test( 1u32, NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), + Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), "Allowed to set value under non sequential value", ); @@ -140,7 +141,7 @@ fn test_nonce_holder() { run_nonce_test( 10u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), "Allowed to reuse nonce below the minimal one", ); @@ -156,7 +157,7 @@ fn test_nonce_holder() { run_nonce_test( 13u32, NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), "Allowed to reuse the same nonce twice", ); @@ -172,7 +173,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), + Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), "Allowed for incrementing min nonce too much", ); @@ -180,7 +181,7 @@ fn test_nonce_holder() { run_nonce_test( 16u32, NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), + Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), "Allowed to leave nonce as unused", ); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 00a5d6494fe..2e854cfc784 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,7 +1,7 @@ use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{get_nonce_key, Address, Execute, U256}; +use zksync_types::{get_nonce_key, Address, Execute, Nonce, U256}; use crate::{ interface::{ @@ -49,22 +49,40 @@ fn test_vm_rollbacks() { TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_0.clone(), false), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_1, false), // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_2.clone(), false), // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), ]); assert_eq!(result_without_rollbacks, result_with_rollbacks); @@ -142,12 +160,23 @@ fn test_vm_loadnext_rollbacks() { TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), TransactionTestInfo::new_rejected( loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), ), TransactionTestInfo::new_processed(loadnext_tx_1, false), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), TransactionTestInfo::new_processed(loadnext_tx_2, false), ]); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs index 08667ccc625..e2155c02b7e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs @@ -1,4 +1,4 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; +use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160}; use crate::{ interface::{ @@ -14,8 +14,8 @@ pub(crate) enum TxModifier { WrongSignatureLength, WrongSignature, WrongMagicValue, - WrongNonce, - NonceReused, + WrongNonce(Nonce, Nonce), + NonceReused(H160, Nonce), } #[derive(Debug, Clone)] @@ -40,14 +40,11 @@ impl From for ExpectedError { fn from(value: TxModifier) -> Self { let revert_reason = match value { TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, + 144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45 ], }) } @@ -58,38 +55,35 @@ impl From for ExpectedError { }) } TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector: vec![144, 240, 73, 201], + data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], }) } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], + TxModifier::WrongNonce(expected, actual) => { + let function_selector = vec![98, 106, 222, 48]; + let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); + let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data }) } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], + TxModifier::NonceReused(addr, nonce) => { + let function_selector = vec![233, 10, 222, 212]; + let addr = addr.as_bytes().to_vec(); + // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field + let addr_padding = vec![0u8; 12]; + // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field + let nonce_padding = vec![0u8; 28]; + let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); + Halt::ValidationFailed(VmRevertReason::Unknown { + function_selector, + data, }) } }; @@ -115,10 +109,10 @@ impl TransactionTestInfo { } TxModifier::WrongSignature => data.signature = vec![27u8; 65], TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { + TxModifier::WrongNonce(_, _) => { // Do not need to modify signature for nonce error } - TxModifier::NonceReused => { + TxModifier::NonceReused(_, _) => { // Do not need to modify signature for nonce error } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index 4d728962feb..34582fb9dde 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -1,7 +1,7 @@ use ethabi::Contract; use once_cell::sync::Lazy; use zksync_contracts::{ - load_contract, read_bytecode, read_yul_bytecode, read_zbin_bytecode, BaseSystemContracts, + load_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, }; use zksync_types::{ @@ -60,8 +60,7 @@ pub(crate) fn read_test_contract() -> Vec { } pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let artifacts_path = "contracts/system-contracts/bootloader/tests/artifacts/"; - let bootloader_code = read_yul_bytecode(artifacts_path, test); + let bootloader_code = read_bootloader_code(test); let bootloader_hash = hash_bytecode(&bootloader_code); SystemContractCode { diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index 0e1fb9b2762..bc433a070b3 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -22,6 +22,8 @@ pub(super) struct MultiVMBaseSystemContracts { vm_1_5_0_small_memory: BaseSystemContracts, /// Contracts to be used after the 1.5.0 upgrade vm_1_5_0_increased_memory: BaseSystemContracts, + /// Contracts to be used after the protocol defense upgrade + vm_protocol_defense: BaseSystemContracts, } impl MultiVMBaseSystemContracts { @@ -55,8 +57,9 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version20 => &self.post_1_4_1, ProtocolVersionId::Version21 | ProtocolVersionId::Version22 => &self.post_1_4_2, ProtocolVersionId::Version23 => &self.vm_1_5_0_small_memory, - ProtocolVersionId::Version24 | ProtocolVersionId::Version25 => { - &self.vm_1_5_0_increased_memory + ProtocolVersionId::Version24 => &self.vm_1_5_0_increased_memory, + ProtocolVersionId::Version25 | ProtocolVersionId::Version26 => { + &self.vm_protocol_defense } }; let base = base.clone(); @@ -82,6 +85,7 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), vm_1_5_0_increased_memory: BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + vm_protocol_defense: BaseSystemContracts::estimate_gas_post_protocol_defense(), } } @@ -98,6 +102,7 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( ), + vm_protocol_defense: BaseSystemContracts::playground_post_protocol_defense(), } } } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 75788d48058..e342f2d73de 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -307,9 +307,6 @@ async fn validating_transaction(set_balance: bool) { if set_balance { assert_matches!(result, ExecutionResult::Success { .. }); } else { - assert_matches!( - result, - ExecutionResult::Halt { reason } if reason.to_string().contains("Not enough balance") - ); + assert_matches!(result, ExecutionResult::Halt { .. }); } } diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 45ed802d68f..13e5ecc08ea 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -60,7 +60,7 @@ const COUNTER_CONTRACT_PATH: &str = const INFINITE_LOOP_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/infinite/infinite.sol/InfiniteLoop.json"; const MULTICALL3_CONTRACT_PATH: &str = - "contracts/l2-contracts/artifacts-zk/contracts/dev-contracts/Multicall3.sol/Multicall3.json"; + "contracts/l2-contracts/zkout/Multicall3.sol/Multicall3.json"; /// Inflates the provided bytecode by appending the specified amount of NOP instructions at the end. fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { diff --git a/core/node/consensus/src/registry/abi.rs b/core/node/consensus/src/registry/abi.rs index 55cc7f9264f..d9e2996effe 100644 --- a/core/node/consensus/src/registry/abi.rs +++ b/core/node/consensus/src/registry/abi.rs @@ -19,7 +19,8 @@ impl AsRef for ConsensusRegistry { } impl ConsensusRegistry { - const FILE: &'static str = "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; + const FILE: &'static str = + "contracts/l2-contracts/zkout/ConsensusRegistry.sol/ConsensusRegistry.json"; /// Loads bytecode of the contract. #[cfg(test)] diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index 70df1dff928..8ecc9de3ddb 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -9,7 +9,7 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { scaledGasPrice } from '../src/helpers'; -const SECONDS = 1000; +const SECONDS = 2000; jest.setTimeout(100 * SECONDS); describe('base ERC20 contract checks', () => { diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index cb1bec35b51..b17c2b33598 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -16,7 +16,7 @@ import * as elliptic from 'elliptic'; import { RetryProvider } from '../src/retry-provider'; const SECONDS = 1000; -jest.setTimeout(300 * SECONDS); +jest.setTimeout(400 * SECONDS); // TODO: Leave only important ones. const contracts = { diff --git a/core/tests/ts-integration/tests/system.test.ts b/core/tests/ts-integration/tests/system.test.ts index 3c09bcb7b46..7ce2f69acd6 100644 --- a/core/tests/ts-integration/tests/system.test.ts +++ b/core/tests/ts-integration/tests/system.test.ts @@ -373,7 +373,7 @@ describe('System behavior checks', () => { const BOOTLOADER_UTILS = new ethers.Interface( require(`${ testMaster.environment().pathToHome - }/contracts/system-contracts/artifacts-zk/contracts-preprocessed/BootloaderUtilities.sol/BootloaderUtilities.json`).abi + }/contracts/system-contracts/zkout/BootloaderUtilities.sol/BootloaderUtilities.json`).abi ); return new ethers.Contract(BOOTLOADER_UTILS_ADDRESS, BOOTLOADER_UTILS, alice); diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 79a690a1580..4065480b121 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -236,10 +236,10 @@ describe('Upgrade test', function () { }); step('Send l1 tx for saving new bootloader', async () => { - const path = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul/playground_batch.yul.zbin`; + const path = `${pathToHome}/contracts/system-contracts/zkout/playground_batch.yul/contracts-preprocessed/bootloader/playground_batch.yul.json`; let bootloaderCode; if (fs.existsSync(path)) { - bootloaderCode = '0x'.concat(fs.readFileSync(path).toString()); + bootloaderCode = '0x'.concat(require(path).bytecode.object); } else { const legacyPath = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; bootloaderCode = ethers.hexlify(fs.readFileSync(legacyPath)); diff --git a/core/tests/upgrade-test/tests/utils.ts b/core/tests/upgrade-test/tests/utils.ts index 9f130c1e556..2972f8411f5 100644 --- a/core/tests/upgrade-test/tests/utils.ts +++ b/core/tests/upgrade-test/tests/utils.ts @@ -88,10 +88,10 @@ export function initContracts(pathToHome: string, zkStack: boolean): Contracts { require(`${CONTRACTS_FOLDER}/l1-contracts/out/ChainAdmin.sol/ChainAdmin.json`).abi ), l2ForceDeployUpgraderAbi: new ethers.Interface( - require(`${CONTRACTS_FOLDER}/l2-contracts/artifacts-zk/contracts/ForceDeployUpgrader.sol/ForceDeployUpgrader.json`).abi + require(`${CONTRACTS_FOLDER}/l2-contracts/zkout/ForceDeployUpgrader.sol/ForceDeployUpgrader.json`).abi ), complexUpgraderAbi: new ethers.Interface( - require(`${CONTRACTS_FOLDER}/system-contracts/artifacts-zk/contracts-preprocessed/ComplexUpgrader.sol/ComplexUpgrader.json`).abi + require(`${CONTRACTS_FOLDER}/system-contracts/zkout/ComplexUpgrader.sol/ComplexUpgrader.json`).abi ), counterBytecode: require(`${pathToHome}/core/tests/ts-integration/artifacts-zk/contracts/counter/counter.sol/Counter.json`) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 5688db2e3f5..d87a0dea1e0 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -97,8 +97,7 @@ RUN mkdir -p /etc/vyper-bin/0.4.0 \ && chmod +x /etc/vyper-bin/0.4.0/vyper COPY --from=builder /usr/src/zksync/target/release/zksync_contract_verifier /usr/bin/ -COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ -COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/system-contracts/zkout/ /contracts/system-contracts/zkout/ # CMD tail -f /dev/null ENTRYPOINT ["zksync_contract_verifier"] diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index 1012eecfc16..f5c55860740 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -23,14 +23,9 @@ COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/local/cargo/bin/sqlx /usr/bin COPY --from=builder /usr/src/zksync/docker/external-node/entrypoint.sh /usr/bin -COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ -COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/system-contracts/zkout/ /contracts/system-contracts/zkout/ COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ -COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ -# TODO Remove once we use foundry inside contracts repo -COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ +COPY contracts/l2-contracts/zkout/ /contracts/l2-contracts/zkout/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index 13a39133327..319d0cefbe3 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -31,14 +31,9 @@ EXPOSE 3030 COPY --from=builder /usr/src/zksync/target/release/zksync_server /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/src/zksync/target/release/merkle_tree_consistency_checker /usr/bin -COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ -COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/system-contracts/zkout/ /contracts/system-contracts/zkout/ COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ -COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ -# TODO Remove once we use foundry inside contracts repo -COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ +COPY contracts/l2-contracts/zkout/ /contracts/l2-contracts/zkout/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 6cfacb3c72c..903696e3a81 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -90,8 +90,8 @@ fee_model_version = "V2" validation_computational_gas_limit = 300000 save_call_traces = true -bootloader_hash = "0x010008e742608b21bf7eb23c1a9d0602047e3618b464c9b59c0fba3b3d7ab66e" -default_aa_hash = "0x01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e32" +bootloader_hash = "0x010008c37ecadea8b003884eb9d81fdfb7161b3b309504e5318f15da19c500d8" +default_aa_hash = "0x0100055da70d970f98ca4677a4b2fcecef5354f345cc5c6d13a78339e5fd87a9" protective_reads_persistence_enabled = false diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index daa317a8bc9..dbadbbc2c77 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -26,13 +26,13 @@ RECURSION_NODE_LEVEL_VK_HASH = "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a2 RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" -GENESIS_ROOT = "0xabdb766b18a479a5c783a4b80e12686bc8ea3cc2d8a3050491b701d72370ebb5" -GENESIS_BATCH_COMMITMENT = "0x2d00e5f8d77afcebf58a6b82ae56ba967566fe7dfbcb6760319fb0d215d18ffd" +GENESIS_ROOT = "0x28a7e67393021f957572495f8fdadc2c477ae3f4f413ae18c16cff6ee65680e2" +GENESIS_BATCH_COMMITMENT = "0xc57085380434970021d87774b377ce1bb12f5b6064af11595e70011965747def" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_ROLLUP_LEAF_INDEX = "54" -GENESIS_PROTOCOL_VERSION = "24" -GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.24.2" +GENESIS_PROTOCOL_VERSION = "25" +GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.25.0" L1_WETH_BRIDGE_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_BRIDGE_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_TOKEN_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index b7d4ffebcf9..1b154b9e9ea 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -1,17 +1,16 @@ -genesis_root: 0xabdb766b18a479a5c783a4b80e12686bc8ea3cc2d8a3050491b701d72370ebb5 +genesis_root: 0x9b30c35100835c0d811c9d385cc9804816dbceb4461b8fe4cbb8d0d5ecdacdec genesis_rollup_leaf_index: 54 -genesis_batch_commitment: 0x2d00e5f8d77afcebf58a6b82ae56ba967566fe7dfbcb6760319fb0d215d18ffd -genesis_protocol_semantic_version: '0.24.2' -# deprecated -genesis_protocol_version: 24 -default_aa_hash: 0x01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e32 -bootloader_hash: 0x010008e742608b21bf7eb23c1a9d0602047e3618b464c9b59c0fba3b3d7ab66e +genesis_batch_commitment: 0x043d432c1b668e54ada198d683516109e45e4f7f81f216ff4c4f469117732e50 +genesis_protocol_version: 25 +default_aa_hash: 0x01000523eadd3061f8e701acda503defb7ac3734ae3371e4daf7494651d8b523 +bootloader_hash: 0x010008e15394cd83a8d463d61e00b4361afbc27c932b07a9d2100861b7d05e78 l1_chain_id: 9 l2_chain_id: 270 fee_account: '0x0000000000000000000000000000000000000001' prover: recursion_scheduler_level_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 dummy_verifier: true +genesis_protocol_semantic_version: 0.25.0 l1_batch_commit_data_generator_mode: Rollup # Uncomment to enable EVM emulation (requires to run genesis) # evm_emulator_hash: 0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91 diff --git a/etc/lint-config/ignore.yaml b/etc/lint-config/ignore.yaml index 3d0c4869df8..b4456a6c3fd 100644 --- a/etc/lint-config/ignore.yaml +++ b/etc/lint-config/ignore.yaml @@ -2,7 +2,8 @@ files: [ "KeysWithPlonkVerifier.sol", "TokenInit.sol", ".tslintrc.js", - ".prettierrc.js" + ".prettierrc.js", + "era-observability/README.md" ] dirs: [ "target", diff --git a/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..f1b46172d6db15bff4ca6894b7f0d73830430003 GIT binary patch literal 75168 zcmeHw3w&Hhb@$wT>8||HTDD|KmaMD9dARvfWD|$TvB_@y$ODu3m86tHw6SGJu`S7x z?AXv2*#y#rw4^`@C4o4Uwost7yb3gFWYRVS+R_9l4doF?fRASa6vC^x|8vgFeazn7 z)oNEszkVqB`R<)Fb7tnudCi%}RS89Z3DvEicqARwiN|2FKQX41+mh^e?obX=Rwb{( zzmDTMw<|TjS*erj@jjhaDS8g39&}1Xzvp4w@2*(ahxfl$F$=ma!7A=a?XS3&ODHApmno% zwo<7U)rA~RatHqHa#ZqKZnwkq8gz0(N0oEl!gzC_r%y+9oaspYp#CU#F}0(0op(Cp zRqhhhBmHSbf0XI~d}RpUX2H{d4}{?DQFvASo%P{I$9TNmrJxh~>2?Uc&~h3NrMl6c zm3a0#p7UBf(+Mvzj^~SI{D=%Zy6IFu(OY$;^PneXOs6iy!%5|x`{`ZnWPDDtUk%V} z>U8jhQW%2*_&{}~XdLhdex~$&8b8%XcvW&m@=6Bh_}xi(f;W|5egzjepQ3!JC5h+I zGnu6N9p&WIE&RS%(w)VU&nY-}Q@Y}DT6hlf5QO=idJU)r^o#j> zxzNMywfNH0>3vrEX_8LmG2WdnyRo=!ga$FX_qVnKr)g|NA{8c?J ztRJY~35WT3S(6#hvuM2Lzgz2V)z=iFi&7rO={CS0*6_SDhH)dRGQIF%eC~4~Ezpbe z-=OvPAha9HUxMWZaMqjizo_9;3|~kM;&D+rUry%}eGfaZP~%B1q;|XgiGHrkh+?(pIT>#E0`NNLdn`kE zmLeb7A$nw(p52*2l8?kxlddOonCW`L&^6P<`FhI@g8e_W)4n9f!Og2rC-f|S$aSOpg8kW1JBchL* zZgwt0y5|l`y6bYg6yT_rvKN78KIF(De4Z)qzMA{fuj#o=_xFTuziN{HuG4x}^H+k} zF`(LkN$jm^5;}Av9rQx-Uc$TJ3`3_Tp=Ynl{c_GUbkcMU=yXSvPEEr?r_3Efr;Mgk zM$;+7bf53jDWmCx_DATHxnAg$d9|j~Sd>nH6P?br=+x)a>1IQxKB6Ddg~rR%?N=G@ z?|h$5Xea29@R*Km$QjF5NM`yjSY_xW{N%2|I1-4f&*LU2PT_g0~k`wmT~ z=_s85Cpw`GMevoc5Bf~`=65n3`eD(D?^N@fxEE4AJ_^%1n(`FNl3fJUuikZ@V)q<&Y8duCew4Ms+TtNE5%cUP~9{7lV z3xI=vbI@<$_B-FU@bn5ED&I%=mpw!LHH~(XJ^(Co4sQQ^^%LbKj(`sKJaj(cX$}0-8vR%Ih0*%r@uR&7p)2~Q^`D~# z2WDTd>XLB*zUce~WE&h%Ph!IV^hJL9 zHuQt>SajdcbhxDKJVfUE0X&}00ovwJs-fzE93hi0&4sBOjb^Ay^oFC$E@IUlLAph4O zU!gT$g%1OGJf8$`n-71Z3Vxcvtqj#)BYvI_2YOAZQ=sl`isX{x0ZB+=k@WH}MW3Ya z(fS+lI+{nfAa-+-;2z#dpKAS$UlMm)YV?N8UsZQn#$6@0i=QUx$=;;sV-NXnMQN2h z-5G^)IF(|5Wd5M`gW6NNd@3b+JGE2r==a`KUi5lu1(gS$^E7+|*)a+q@jKWe&;B6d zVz3SAh^{8t26PjMh zi-lfEO|PV;7vkDPubqs4k@D~#e72Uq5k7H$6MRqz=|$WAN&IH^dh^9q@YkuA)gs|n z=o^k}xGkVxfyOtHn(D-3&|&-L^LgIHMV(fOW0u6hrQEciuaEM%zoqq84)qPRUg%-I zjCeSiSC8o1dWDud(mxcxT|oKpB)RH=e1Z&WJ>8&;@wO zEfIMZ`UUa~IJZmd9~GsCyC%uw9LTdjPOZA0zKZqu@@&=94Y`qeS8F^HAMe%lhyK(3 znOQx)JX`gsL}T!aJcBPlZ^T7fKBoDvN$PX2MY{{2v-oqqe};FCkG8xEVzOR=sgw)8 znU5#Mv&etW;0a~~jQqpq6*=_~Jy+))TK!Q@E77;*UqoM>m2>jMA7|xK>coJWhYTqy z=N7>yh1N^U+F9|d19-e04B)n%y{-!WdBmro`fJ6%s|tS6TS5D3#ILmf0>8^suw|rH z3b4!WZmJhl?C|+f)+aJrKg`$mIE|qKzb5}gkNa=*_47EX)vI_t;x{n<7?+kEGEVLu z@Cp1fgSX{cv4`&UR58-QNxaS^>ozDTd<1{cWB*VkH>=AiTVj_jUmnYQLeDdjYPys8 zfb|jd^Hy~Ol}oltdFTVS>*y!z0Y^!BOiVbP%M(9Jc__|`e&uk)ugsVDm6jg~_00Nu z;nlvrCj2BHJRfrRpO_!hHFu?w_bwvy#duNSC3rvFk-r{;)W z_VtppO7xPXCyky>8M}dXIrvXW#r`1cYvLzG^^0E9>wRVQ&&4(AH}LF9!cYCLfZyGp z^!0Tg<@eVmp+{)^RbsC6b#48rU0>IRU-b1-<-r$#|Fv*L_)q&^fgXwXzXEtXzXfoM z-(2y(0=SKz`EB-kQfWWt8TP*}De=E9_vM-VFIvwMc^;`E&(i-uUgPN-z-{`DGX6QE zFIJ6=ewKB0qaRW;oOd6rq8}9&U|A0J{Cm*Znizu}B0uph^h`$faV$Kn=?wo(k>2O| zfui*k=&{y(?-uRf;uU(eThj3_*|~k-`}d+d&;#ydbX=6^xprvcesvIVn@`)3e`QE7 z*EmkfrSfP8;tV_w_5I(p@qg1{yOes3;itZx>YY?>^)|Mv3jQeUR15k7{YUFtj8 z#CT#pEq2?o@?QMq#I45*>Z;aO0@NpIYfch!&euV2QFOM>x4dC&76u@nHKUxL9 z@M%!KM*ONBlS=TZ`!eS#^4D6vr2Vv)*0COo2>7XtIfPZ%17g3IcRR^aYC%;Co}2~?f;mA3?zXG*G$6?7{4TSJpa)fmo))NaJ!@Q2{Fzh(nwRUGK%_miRTAiT&x8WV~ zox|L{K>eh4FkPJOpbN#t#Xew;RunpH$2_e=E%b8gKEOKqa_XmeE<`=R+wxvhNs#{MMbH~ywSAJy>(w_6y4{%?a3dbkGXJ$U@n)QO%xFmL8`|At^ zYUi`P&agjNXXvB$r2oHOXHbZ9c;UFDdoA=w0d!GG<8MEY`Se}Pe>7jCcn|s06ptkT zgyNB;XEJyq{qMKxAnB_PYvLf9(Jz-e^=69 zqUVG1wc_7Z1%I9MSG82;E7)(}&vpXhTOj^5cA`C{?Yy%>>?qp9cG!;FrO3|Hc*#7d z8~C7e&i^suQMU&3acCjdPk5HWlOQ}a{(8Q{{uYgA*uM|#sy&{zU5%%AP~IL-@$Un; zjbHdBfY*p$bzA;DboDXN+pXr`Ydw7a(~zUvV9a2zd7O-&->U6^_J`C0(xc5^?7o`) zebxLvt^aeNJM2&HOk+RrF8+YXrSJbGc)ccv{a;*(}tnMx7_gt=f-b(nn{}Q)Z zf_I{`@jHxNHFRLU0(|2P{Sb35!o|49`5}mN)Uuyq;ivU@YW^U8IZ1rkXZoSz(b|vl z{Q_@>?fw1Q)=%9&QbbHY-k0E>X5M)}2bdXh!2BluvhcURzS`l>$4*fDEBL2%>IpjRN`$rP5_xEW@TzozXu-q=)8m{kG0HjK4mb zb2#3bBlSdnlOJbheHp~#GSnZyMIX>QyZAHrAf4oy^eXAIMM;T&n038*iM+&F9KF8> z`ANPpkM$@&{eTZaYGb-n(fG0cfZs*u(}*vKPw?mHI7r3v#7|tFnujs;HTUCDEtz@ZuymdJeK4g`rrLiVi?qp?#^o6H-VuQe)`qAFZMa9 zV4u$aOg+Cs{FOkSFn&D#PHKwenc_wyy=FgVUiLM)e`fpuhxeh8c#2#?Pe3m5F6E~s zKiHY`O_3cLfIlShDdbDWop)C%FL4x@jU?-pd|FMBUP-66CdOf0zHj6hDvR2gZW`ov zD)*rHmpI1)`Of<1!PL#n&rW7g{IjObq@S9;iSCkq>`iZ_{c@xji}dctNiqdFr<|sp z<-nK9>+(7d)O3wL?{|%yN5(lt)a$79Rz3%$GFMA_`fACSJ|z45^!aJhgT2U(B67Z8 z;%GUnHF7)h&`UJ0dawAsSr7Vey}wSP+))&*w4X`lg@HYb*AD^Q(hnu;yk#b`mS;u-Vft~mM1YD?l|}Jg0lNxq@MvizMlcy>Ze|33g}?# zS+bJ@q5TMUc})j&Cx^&{a<8ZMqIYCJG0)@!vZNOc-DVr#m3t42{GgJ`@!j0(jGXW| z`Sf;=Q2RptvHM#u-&9+VtkX~P_t(DDVSUpAS?(h~c{$#h?q${^Q|LFw;rt&%@7>1n zA6{p0U#Zuj|AOff*h`x(((V8r&u0PL<`WYa@%PzL+@<+*tvfj$66_CoSCw|u8($NC z4fx^R(fa&-4=rj=*5Bbzil5-e#}ga$KD?RugVb*E&xj8DuwL+apU=-Dy-x4P*0)~Z z^+DuXRJ%X#mUYnc%IXoBcL(tJ@eSbC`091g0B-Y-tQ!UJ8u6V95V)A7(8_JpzieGl_T zdHpE-7{KHE6~HaJ`S)Ao&~9o!?l0kWAdF*y>@nN~*j<_jz+Qy>Zwrt6m)y?4|F-Fm z;*oX1_;v>6E&NnH>>b6E#cs)aAh*@@YMpd)tLfD`^|m?313jwOMat+=qsPp;T~6~Q z$9F*;jwd&NzT+N=^Ai7&pTGkN`OKas{Ob0iT!GTnVz!g*4$U*$@GkU4JQ4D3_`IDx zAI#~p*NDE>1ja9)aGn$AbtO-VYIvKPYeG|7*fed?5bp z6v8{u$F%=X6 zM|rH(^74WbWJJNy9Z{}M!DxZTc7^a}9}&l5u|fzCNRS##XY|ChI;5>jjsg zxSG+tWEnD&zaRJs@$whIZTXgS?EyS~KVty50ZV;5*#AnWwmD?k%#fLd~vfUxa6^>-e&taK`Z_Gk?haC-oyVPqW9Z-u;)N zcY|@W$Gyt_C|pY@bn(wS>-7$b4-K@g_2UCJKY)ZO$d7w3@(JA~z9)97{JPYAk#(uH zC4Nhd`yv+nLipq?ns%fXhshAFha&<+}&x1;$xjEuRNr;@(mQ{s%5x6ODXu# z>P~b6`RV>bXMO91J&3E*`SIozk#{ryQrR8x=@dT;m)G+Jp64Nbm5=v*k$l9*?B}E& zr*zg6N!0`TTK*=fE#hBV?-RPqd=lq?%z8ugp4e#Id1DE^Xri^@sXn%WI?i` z(%x^d-H==*t=No!S3n+9T^+=qwEI zRrt3I--*5ioEUJRbz@omaXQ;Kl3Sklx!cj+N_-=FI*@}J<)kWpj%$SYw$b=mug)RqX`CtovobhhdWa#~Y#l2oK#q0sM6SSoS$Mi7Dc8EaOYu3i}rB;&(}(PS8C# zqW9^k9J#mWF>SYGpKSg7`9a^#$-EsErO3Y{yNZ1%{(bPjCiMcqeye+q?kD_4)(@Iq z*sUk;vhJeP8CoA)DEV`zfEn`HaQtfGO#>#{W=*@$-BqM4tl< z#&6T-&N;?^cZ2cU^m%K8@!RxyOM~&-?Z2xE{@)Y*kb8H7F+Dxxx5*Ed2k}VEne7{^T6ve{O^E z(|T5z{^vCqzilVZpJV(VZ!rFZMV|*7jGyMMVfsJ6!T4$YKaBr*4aQIN@G$-hqWIlv z_KV`_D`#(C+`p&#Lgg*{=>CHRuhBl%ZNJR?;~yh*p?P!)^LeuK5;wkz+quP`*IE70 z-|q_G@%;$k_Po3rezGGgTZ8hA#xLiO<_P&!VqK#XqiViGrqY{bU;oU$L$=@vurtq3p*RsIUGQ^_*WB~BEa~z0$&&hfgM)qd z_oF`5(C?3x^5@?%f37aepAtU`;PLzuz%BkPq5u80(SIQ%iQ-%&PW8}#A$IhH=}+Hd zW4zB0{avyL1$0C2pIG!YP$n*3ak9bl9ovt&MB34?c!`bw(gx$V?c58Z_}v@mz4iHJ z`IqC#A$UCh2IZ^qZ>{q|+YjdUBG@(Cc$GdM*>WHA57`+y$4cuZWOqou#qNwVU7lB# zE;9cM;PG?`;5J>Pd_XtbuF3h70B)DJ2gY?23QN6GT>%I}a z&^5Pund*zx1J(FU^Fi6T=o_PQQlf`z#c%t|f7D?7ww$~ng1?l%U&H);d0GA@y|J=2 zfXDN90Jr$lt%hItKPX=#{*v*ytw#Qb{$M_>*DgLC<^LAk^Y;G)|J&;VwjH`G!aq=p zb*?+u^Px=*#&5TO^Bm*f&|v(w{9fJ|{4M4Dq-y%h<_}wc4K=2HzW%lG@2wMmHT`~Q zj`6>Gj`6>$3jU8Z*blbjVH>N|Un_sv@yHj=G5$Zc@nioPud8C-2%pCNH9Q}L+Ea@6 zVY=t&^=1d}7a_IWdg>iE-a7TB9VdMA9OECXg8w6R@>Mmte0`PrtMQeM-;N``uEF^2 z@i|^6ep~O7pA?QGzNt?9)#&qw4aRTV{o5Oi-(Kf=ZG-XKo;5PU({gywtQY(8GqSz?2p=d$_gGNMTZ?^B{nY(B?W|dkct@3X zRI@*JKk0seu>SsVgYnzz^ba%`Kd*m)yoNp>h~mdSH0>{0<8zMW!?48fL$meP6Ai*k zd=aMi<59eI^9iljgzLzqm;Eqv zho9(P&%Wns?I%B?2K=g1;+*z(Axg&IUe7Y;hkC)+Y#*SfVILN4GWMaqb(BR|vna@V z46kD?+G*^AxmPQBrrb|8H|to&KA8KMwVep!-m;z*@QKZ*5a_zk$%OZ>+Ur?m;?|1} zm7#xkI?wBVYG&(*iwf&qwvOe;=I9 z_it|y-R~+x_YXYFbpOgTO!u$OknW#u5ZymnhVJ${jY`fbzx#Sef49o*#QAsd1j!%o zw;yzPUp@RfK9|%mzs~kw-cRH0uOHI-pV()LKUK2NNPTeY5eG*tHIe z&&)Y9OTJsZGJMumn_g|j&G~HCx}tiuWS_ts^BL9Wz`nNbpeOvTGqJFX^RN2-dC_;I zZ`b)R;QY@of#bX(EJ&n^&GSV_t>Jnmq+2) zcNA^^71sM!>wUZRzQerhdfM=$9n^I1Rzk^lFy!3dUlac`l+Te&;rt=|Tsq;xaC81_ z#^<#n=XKkEGSl$d;6-1?-nF9eTWKMcXG^-1cM++U^dcYGw;e|$#}@{Zo8 zv|ms2tOC)emUGHA^!ttBbGxB-+x{1~D+I5%-(RDCwLe{s&O|5G8G^^x7nHZ@+=lW* z$2_06LJ(c^lalWcxnHOHiLRjY8oqys_>$<(arT<|jnAq1bO$2%cb#|);``-ee9o(% zzYpOdYJhuKY1;?Gy_DQX+ndhoxKN(_{?-SKe#Ctygs05Cw1C9kN85TYy2t*0hQIsR z`mn~Al>28x_tNV4Pe$J>YwnqCl6aAl_)~BXZIiyAHm&cgO%Lff&`yp^kzEVlLkoiP zce@!H_y0!53AelHaTWQ%q=@xN_+Ei z{S(jk0lZfHOutb5Rq*S3>6G%){S<#FvH$cP&|B#~W9YT@{8dGfBd{>>LpIOppdZnf zzawhkr!&8JTz;NEj=CK3GhX*wL@xzPs>fNwoyTyW@qk)D``xZo4b^sBeI8wb1P|-y2J8XZe$J?B*VB>`DYFC~y=AY?x+J;C=^?Zb6(=o9NLvpz@q7jn^#b)u8d%T@F*n>pr#6*}H3{+oY4 z(2VV9Wj&1U+V)iS>+d&Bn~C0^+kQ-s$7iTtj7`3$!Tb2} zPWEsE@s)vB@fRcaoq*5I1q#x#hK^6`c9LGb5c0xw!oS8&t4O=aulMyZ>7ONk=$DWB zaTXtHyP=f%?y7A!%>CBJE*QG`-v?6seF^t_y5DrJp2tIfXO!2O&^luue7)urM6WU3 zmH+)BU#~fKugQD*uP@`DtIzV|^5QrSiQ!Ni>`EI;p(FG5 zx#G(o{)#XEckyMy==+lIHpu)XqVM@$Z}H!a-UlBFf3p9+MEfu0{r9x!^E|!-M1J}1 z1kJ-p-$(uOLF1PvF0P{Q<@-hAw^!5q)c%?2ee!pp_uJr$oYe9~^IZQPo*!B9cG!jF zUXnx1-yoORZ=~Z&NscSE!N((eNYhK^pLEUpjnT&ZDx#Sqp2mU4P`%)>EhxWfws*mz{Ddtn8^Y=;obmjEPelgrD%jGa5Mf@ezm(KHi z?xQ+hfH)(M*AjfE>J@-H*hhx=O{x!54?HoBLp+Z6>GAb+ew8>w>lxzrcUwucgy@;e*UJX`g)8Dox+DV!lyB90v;ci2K!sm|KDRi*vNhZ#Put ztF%gTd%K#QQSeUGK04?VIT1*cs^)X?uM^t;mdH`cg5VdnRIpQRtyfO6wF ztFq#&NOdHx`hXsH_IHsRkU6=JOyBp^j`)Mzx>j8{zi9rNQ`Z`rOrE{C4~AjN*5fCh~mV;v0#`c?)}gZrhKHKAW|@XWbb7a4kK`5H6)^UTT@s8pZbu=Un| zM)g(+{@=3c-{AVcTc!RrmcFPm|7gFv?(ydNM>}|n0+OLV5R>6=?_9bYI$?NFYi#mat~{x&nMKj?;OprgETwukDXo^8eH`EB{7c&TM}n;uXb1BnT0Fb`H2AEf8tAy&Uzbu)jPNyPyPsw$=4wT@r*8g zo+ELGe1hX1Sr7MZGhe9HdOwP!=dJWX$?yIQA_>~#Px22EpYr<|RNi?E`S{&mA2jlxPksp+0ToK?5C zvHdxu`!i_c$9I#MUb4Q6`Abgfclv~0nN=FzF7yIFQhvyT&gbJT*o2cV>{<6ILS?=oOl0%YG3WKkj!2eiboJ%FM%4PA%VKMrpmDq$#?eqyy{7JpS)tx&?aF z?w37&@pKEyTXduSXWXv@+D-jyf*kILTtNnoQNJub?f#tGQ{Hb$^Ed|Z`1S;FyFFZ< z>XkU6Z4YPduh2df@>d#c2eteK^o{Rt0JrAQ3nI&A1&Ph8WUpX#`2$D102-;N9F zxapkX*KyN^;kW6d? zdA?OY@7B)~@+5JFJU@r`(US87VT2_2fcc~ICgt`5MD@vj-YoqnRpTef zeq!9~Dt;rZF|`{G0CTwJW29s zl~d~U1XlL2aT-l$dr)xrT{(-%PwBuqAj*+_p#Ej>r1mTK7s${3;&~U@O`0!Ku6|y} z(Ru)J3icx?ZZSpqwIAUce;&(iYM|?3t;YyWm3bEyqM*yMKU~f+q_Lik^9vpSq3sqm zk>EAXw>mz9?ht)?u}(>T*f%x4XKB9G@uVIvO-GEbHQ!3;`4;>=j9ZuaeyIm~lAoXT z?K^)zx9e5HC+;u(apQY5)H=NLyg$TcWu58wb6M%*HLZ%Pr+}-T43kX#vZ5A%-_8ko-cHIn(h~+@=|Zp3J3mV*D^D{scRU& zYMK;38mvj;>EDKF_be*#&7 z1GJawRax*8=-UZ=Mes*gi;=Soe?PeE&;5RE;&ynZ-I<--K3%@+{hDr5XvYBIZ{mH1 zT}4ArSqJaJu$?46%%(*i2hF;5hU1W3cl!B-ZYsG(=#kzb^gubLM_SWE__^yc!JAbw zFChB&Q9U$|gPi;In)_LLlPm|FI5$ZBRu`FeB+jtLL+?jTr=%ST{at17A-5xX9v~PO z`M#8Y-;0BeGan{SmvO;9e;$`4&yUegz0M9e#-;N`)_C-3zNdWy)E|tC^e?fK+u@}> zp?iY)RF6j%J>zo^S%rDD=1VT;%MbPm5x%Po-SG}un(t_t`4+e$HAVf+%YID2&AuK< zA2j=V^nQD&QOvLjPc+|jG~X2MJ09qI#L|yVG;pd1{|=Tr?-j2nJEx2s%}!5t9@TWv zdb0BmG`?p`PeQIvfG?fwcqgYq@vR|((#(Rm2+n|OZP`MBm+Kc3>$ zGQR~LJ->CM^V_bI!iO%;Z@WHSmToSMM+hF@ub{j|H{;L2E%bO?U76o@{>a!ND-P-V zbu$0taff~L_w&`d&&0atE=&C>KtKMS<~QQ2dhjm?z5$Gt-p7)qcyS}~w?^^10}aM+ z&u1>2WBhBQ_-8$?*8N>s>PI0M*IDDGbKIfvbpI=gw{Cv4=$)l_OQ_v-$6dK*XOMFN2Bk4xi6HY zf066^ax}f`E7tp0%{vhS@gVZ2$khVT*guEUg|{C3JDt$+9A2lxB#qNCSw>!hW8NV1 z6OQ+2zYKou0-}HSj$G3V>(|}e%kqKrGl0j_F@RfqKx|0!Q9L=$ zlqI`eiLP~yoBIgir~3514YN-2Owo0(rt7D~&7K}E?p$bKn*-A~DW{1q4<#9_?7Gg4bt+(P!BO*ty{h@W`kbWfn$$B$O_lp!N$c6Cf?8lG%1)e{%UiRsk zT~ei9E*EN76~4hX3#rFTzHEC+(s3@Tct75k>i1CGmh*KabiP!c@&W&Ov@ZbRL2l<%Cdub~ z$jpWDBOmWCp}+&jQIm)_;#@R^*(6_DdD7l9cwYqgl=vpwiSecWQalUqXz*gL5BOOx zP5bGqINeF}{%}|Cm!6_??1a}}*MtP%U6-@zOtr=VLO(?1oBFLiG%OHZgNj9UPYr)L1S=ppf?P}0qG0z-)Czlc^^m5 zQ!}-b-A`!#GUxvDj>mdW>;j}ww`VYtuH_JVmgrpQ`CeJMW4Q>yWBDfpxA-U9T?Iee z#ZdVg@v9Vu3UY;gz*Dqd)BQ_yhy0k*eZW&S`7LHWn)d;N56)$M1#+UnWJky@!LDMx zrlx!Ky8lv-7flTbPmSx+-B`z>{=tx?X&uV+zxzIo_gPwZ>3*Ps-befwY{-5ljn7FM zpT8!)pmgo;;~K(QjgQF7lQWfs|J~z-QWTPBz$YHh!}0KjXYK`^PR+KJ=uMPxgy_Fze@O?N94_ zIdOj>=P&0^8@-hxKN@;5>PNG^!S`gLA0zvIUE}9L4X_{GgL_|#tjAbRv_0zij<&Z6 zlV8>yd%jgxPTB7W!QyoB@%J@;e;8W~< z;`_}oW-2A;jeB5!g>S*KY`p5lo`tG0WaUs4WJ7m`5&AofPPLA~6L{7#9`XxLrkIryhl#+Xb z5$_N^DEruY9`VP6&vEvAL*q;8{Wr4T5)IIC425$upTaAie7{F3m6Pvr;C#%5L?zgx zTL^CM|48WjK(LRA;wDP*y@A+&DRM;m5^)Z10Q09`haTj43;5!@L>KTszr#?wiX8Gp0SZ13WzNfZD8l=|vIjXn$^vgN_O(|3UJbvGe z^D)G?O1+=n(_Zp+dWUu8c-QiWG=F(1PFH`Q<@{;YEN~qsz`Jhmye#V%_3mcbe>?A! zDco^}iz?TWzF2;a&#!Nge92dG`e#M|JC7xWPX0bQz(HRMh*2C4=fL`gq-uZ`{(VB3!U_PuMZ#BbXu+9ybqV$RDOp)nkPI?g8if3 zZpIHI1Ba^z|Ek2NrCd_qPX-W;r<2s>Fb};Q>hWx87u}Dc$5-rM3j1skcU^vq=ryxX z(QU~@&vj}45$O`|Rf*IT)q{SLenN$OKQQ)j_tW@*lLS63`!@|7?My@e4z6 z8$bJd{EmRiPwsMbpxoP5`84!VSe#6-$}ZoraK8w{CTV1 z^Mo&CKU(kRvUKBpb|H9tzk>1>-Td=bDM$Q5ecnp3AC3Nf-1vvGZiN1jCZNB@#0|k_ z%=c!$SRdXCkc*2@0sC7~zprNh#o+C2FkY+Ou0Ge)uy#`%kNCyR@AbK+Im55dH8l*s zO&@)(sbTnS`rI|g<9p}a;-`D98_M@({Py_x5cD%qEN4CRN%Z|v>)rqUkIw%B z182Pu+((hdJH_WY&OrJydm8oATV7V5k%{RH!DIDV2yW{$y>5doJlf8i^;YO(vWrl7 zysp^?{}1-H_W{vw+0$X43uxbG`^4@eRDf1ka zTt)i57juo1q`%Tho+n^lA%29H%ClUnG@rBR{SQ>jdM=|Ov&XjXsxPMXn%*@4H?-2`MCW5 zh+gbZfDW>60RFM~5t45Fh(VbTAwT;Y*)#P%0veA2&-fJ>T=pO2xr2y{0S8}L@h6ZA zpp{6cdqKyTe~6E$wCDVh^5ad}vw$kZPjvkw%B6tSb6$_Qr|k#u`bda>YUVH02fpb2 zn6}TnuI%-c&=Y$~I&j z(27fE_5RV`AD{x}^E&N$0`w&QH+FpmjX(O;DEyY)${K&9QTXlluax%3>t*zl#wiq+ z%bp#L-(cH_$cY{QawPsm)A1tHUF$LWjyTT?YP)Z*_mt5qAQ1GM*Y5khGS7fqay$j+ zUm3rHdC~pU{*>rjaGSngb&1@sig_)^UBH)^<3P_Y*7vS1mVENv69D+TKz{s7p9`5; z{0nlycFgP5PfwmyANU<}c&6`SdK&(RpNqcXRo}y6(lJh*lrH7Ga}W>eqc_l>^|8r^ zcaslmkX}FXonAekQY{OpoOeF*lisU__X4&kI{t)m{`bXEw&LF@{FTXoF3MlxXV)Ax zzi0ToDV%R}ad`jGZf|nbn;4q(Muu+mt}9ONx_+$}&Q#`i^5ZA+Wk8$^_+Srx?;fgwfTyjS3gasIecHOg ze|TiG^XjKhU-!D-{hjxH;lbqe*VKo0eB$_77ry*w|DJlutKL3VQYM^meBEI9wdD8l z%JnC^wwx#_6G}*~XsZnW_=)MsBU>lOLuKrrLq~>o9hw}nll}KyhsMW;MkY5M85`O) zIkbD{kt_G?0jK)#$s-$!lSM1hhvH*Bq2F;o-3N}Ih)XwVJCAs~QGqwg)q2Ad-tfq- zQM74S&<2xNKO>#lgxtG_NA_w2R@TUCgfAnI_?Ni+C24;B)QcEAK73+$??`d-(0HWv zSfn=X-&5Q*IXZrHHIri)??u{^fxbyfuGI`2Oejr~l z=fV$oRp1rDYT^4I+c7jbN$Q6z`pOV_gE;B!86KY?EplmL$EDX^xq0)BO*=!Sf}a};I}6ut*>uGvJ1-3qL%-l7&i*g^ z@MDzwhyVp$>J8x&cDhiZ)eER2l}jj)3N#)TJdq{rmSd}G_o&F>jLSA_G855cC7kZK-_^}{;C znPYx~K37eEeMZXAXKb{x{X}`N)F$@w;uvZdlxN|(;h35w3FXhGNmjW(z;}*n1rnF1 z;_y9n-*j9VKdt5By!fYRIoa<+`zP0W6Nh%~8k#URp%fma^L53EAzOl;u5pG)CSS<}_i21z((&HV#cog?S_d|YGMKfL9^k%G!mH!LdFJRiT>V?2H zG_n^i&6d?$M)!7?FUpV@`;ZS8IebHT-llw41!k*6HxYcxs%)juPMkj_b^YY;7fWV{P{?V|? zAFVw6>PVIGKR9%_OwE#frM6FJO?WiHCKJZ3xPEvqD^+8+JW9ZzOH+E zOk7_%d@?yU9PVyOTPK34NC}J*4&HdMq5u>pj>MKTiKIVR?oERA9Qna-t^L5y@A~MW zQ=JDN|3NV|f8ek7etGG+pY7Os^DnO2@uwdGf7&dX3ZpRmM0P)}C4h^4yt=r57-l3g zISC_*`TgxD{^WCUGL5ufnCy=EQRX>OJUq1X$fYo$LlYzk#+qR|bC_CG2EQafb>ID* z3C%8Cd|7M_!xP-GSQsUu|78C_o_GRGkWMD$Tt+GD`pJOo86Fu%H=-^WeAVGSM|VVQ zO1LqBeTglu-m&d3$aE`AWAKp70^6PHcS6P7f?B3(!iu-ZRA8+@^G7cWp zFIvpbBdM2-TmtQexg)vmhlWQc&p%IR48u#cA3kDbBedSJDYfF?f{IY3`oH#1_f^Sa zgCp`4^q)K(Z^u!DI1YFlMn@)xj*z!b3uUwYORmTbE~!#DT?KyC$i9)$8%Jo`G=%^Y%@fuHCWa%AHk^Sz!1dUYYovufr!Qhi6oAqN4g##mert8mnTU{P>a| z2#9vVhaV^&!NhcQe0cKc=Aj|XNx`wm8it3{4Hh2`MG{P8FGj>l%y<|cG3*+F51+IG zGd7$8Nv6`!^Zwo^ywGdO&wxK^{*Fd^V>JEt^WF8})9!o!@kJYt-}#xFKQeE8$G*;O zufFpBlUKA}`_TBGZGFW+jD3mBKh9tC!>{|uVtznv?uS;S5XIy${J6>Dfia1=iZl1y zcRu{Xm|}jCZvU8|3E|MbV3(gW{zs^OUib*t9bY0yz3w%;LYg#4_kSH9}M+?fAOK{NLdp$JgC!UW4&7Bw)97g8P9?%S^^HPs}&>6o{cX#?!fU6&=9C*Oibg(xssrE6*+odd`QPCKmHQXspH*@ETgEK=Eg`ocVsL$k{D4(v^$PzC zzDLA$=L`m3YPqF+Fn<`HoEX|)GL~iB3cnRleurvSth)3|E;!4_6Zd1?=byVRF5RSk z=cg~e)%=co-u1#AFOEw`nrG4@;}av_G(Kzn+@@6UoE)tLRq8iiu;WEnY`AtqVdsWR zuf1YZVLN_q*%FjL_VT3_^#`IWlUI%*d^0)j^{s~ljtpTUzR_RPzG1ks{OsCSSw+C1 z68jup2n(85@z>Yd4=*qJk`|w}N__mk;!9$`Qsl6$}Rkt@!5{%_v>NbiZw=YQhA E0X?I^w*UYD literal 0 HcmV?d00001 diff --git a/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..34d17f1752fae7f38ab1584743ce245159ff2159 GIT binary patch literal 71264 zcmeHw34C2gdGDONog>TE;zgEZ$v(nPf?r>VZ4#)AL+-ULC!vPOv9qy7H`cYS*p_5T zcI*H}ae)vBC6pzBB*lT0K*J8Nk5ZDzO>b*JTqu=V` z{w==KNzc>kR4*0IJRSA$=cZ2({x_#bshm<9HmH9AK1ccLLY1Z`jJFFv*V0Xj>$SW^ z#_iq<(iP~r@-r8x6}aZOd`1mqE>hHDR`;tP_eyP5EqonR>*(sQ{ZtE{|CQ6?d|au?Mzq(vIJ5F%jG$TQo!*IZU5-j$%T#fAeg>QtnK#wq+|77f zczlk`TgyX2C-<+VefR!g9E&jy<-S`#e~9*L(oX`EG`ECc4`NaJo;cNZ*%FR3;<=z=@PjUSS{4NWgMg86g{O&m1{ZJMB zpqpA7sb3?0qPyCU(R2!bEI6e3ubuD`5mFBLT!BLR-Z_W(ZXxFWB=c{N8=(uh$Z1ix z0zRnWd1n;-nga%rD+nIpb3X#EfSfEmN6X0oW}EpsmC|sQqlG6le3Idds6)(;N|(C> zazc3Ct;?O@ato)G``fgXL)$k|zo*jF-U9qe3%*R6+x=*o<#Zu*2A4b4p!=0=(C~h4 z@5KTK*Ag7WF?iB~=OejwT<(>++^HOuTZVEXUn-X)e5xxqKy*$`y1Jd*5vK1On0{H5 zXpsKpTrR)L8z8-E0O7DW~a#{>SK)+b49& zy+YGzG)^bLiB4a)=+qO?>1IQx9-;%$h5F&^{;M31H>4B#2|A>FrsHyyOyn;-7QXt3 zp_A~F`=B?#eENu{*?lJ6!kF9BE}i;c@Is%x}Qw=qJtJ z0Q%9ZmVi#4Vm{Y$)rIGPC||m z(8;}D=!AYVoyzJ1O(*c*Ex_v-I=RQRKDb@e>HTp!0Zw$x!H!?@;*ZD1ev%{SMH*i^+fV&PJ9)mUTt@6c?nCL;$zM}S*0K2^g#*;V*0JTH)_lQd5`pkO}G z{>PV~SwQ!u!^Yl#&VV}ujxYQta;Wtq`&WSSc8#H{X4E&hD)$({h z;h(j{`u%0$m!@&yUyM@h>>>C)nE%v(o|oV0doxd`<=*h|>C?bVp|^T+4N&r=pZfAIXPC0&Frpqmi^U1^gGM`Pq(|XN~ z!+}ru_yJx2zpFf4S%r@;|NY`)SQD{3v((Ee*zN-9H?5abPkCbbICcIE`FLj;KHd@X z@l7bcFdps4fM2Mbp&QwOd!lq3a1!a( zg4p0B%9XQ+&A-y{6J_de)9|S>@D~c){a4VXQho*cQgzHB|IsP@fss%i4u6D?E{+=L zh0jS3y79m3_~hTXU&H?e+)MtUiGC2={eQ9NL`Hf3Z?Wg(DarG%V;&g~`x#+A=@i;8 zwEnVeK8f=(5qNUGC#-Mhld9owLHldx5r6YM<8PWH{zr-am?el1`=2HcX8d-h51?zb zAJ5~#y~&F9YaNc%P7=e%ab-&Ycs z!K=j|bxi~jUcp5yWKYG(vW;IzbR3lX2(L+vch==vEQKV^0a9(}(mljnY@%o?hX z5uC5#TSRZ6KI^TF&Ua+;6sI+QS&w6RW&G}gSqDT|LZk~Y?=8y5zh{CEJ|%qO{yX@fl3tcP ze5iMm`89;w^6(#3@Xt{%s}|u`&jyTnF>DShv$t zv7JD!t#-oo#0?q`uiGdy572{pJTq%2kZY?Qm8uJVE!W@==zI9CO<&b~=t_I;e}aEl z9?~?9W>A{>1ah;E?ZI~-E*B~=7q3I)aLs{_z>IhXeo_n8w5U(gbwi%lX(2a6*QOtf zzB;GH$y2`NoR*9_(W}UB%rGDR2>elKeY~ul6}b=L$$BM(+j`}(D)_};3hUR3|G%o> z7k_Wo_*IkUhi1oD2auOSyn}gA?vJTmP_dk!X8bVYry(uMa+1S*B9{w6+8}%nJS<<1 z@=3mQTn4?UraCAn#}ArcD5i?}+MmSywx3P&+bll~&9FXPbTZI~grE3=;|=#`=vSfj z7iIZE<}HLL&szw$_(J)hD~)#r+F8qdt;eZ)*zaA&ɤrviC*){DH$^R&@l8Kak- z0?B<^LED1gNe|L|siiZP>&-RkjljRDjNkooI?#hX)Sv7b(}T5*r*=J96Mn4+mn$DO z3;xZbPsjY5P)_ZA4C3E}@ML}q;TFHS;@^aD8$aj0BYLn#{A#(({~7jgei^LG82<+I zS0w%t|7JP;ApPO~yox-__(OS39)Ae8>H7=DKX3GPe@XahTski1{^Bk(!?@P0XYu|Y z_;(kxe+Q%{uEj{qB2V(*+h z?49@-Lu5c|c_ZkxmQ zo4Sd1X+Ag(&wfPb<9}<-Lksu6O|@Yjuzp$Zd2oLhZ?E3?N$g?IX`CYer@6iI@;OKS z>Y@Kc0tbeur+K zKcn%`J}bysB!6%FSuzh{9?F&zYkk45{QaoSex2OjIYwu}M zkK?)s?ND5JjOj(=mv~$`S*#0%5EG7*Bt!-kL0a@-fDkp66;M&FXK;{ zbq5o_Vp?Yue-gh^-p%%}%EX@wS8Mu`e#3k;4C;HcZlU!4-*(3rzMam8d75YK|2Dl= z^X(EXH#!dverLPTgq#}Bx8~~x%$-AiL%S}&813L$)0e1RkOyoJ^w^V$ZWZR$;@@d@ zfb?J`{;f6c^`VD2-zagfGV6&FUjiuplKcIXFY}$1{mnl}_4B^3A3z=oC`EZt)(bhw zqp8%DTEBpg0avM9Uh<6WPyHdj20e-IRH~-h!P*msiag|PcG z&pXn*vz(JTucY_A|6%1Fx3k|D=;PMk8v6-7{tRGk(*0TBa9+FlbxQSI$nQaz_Si3J zej`6u-XGC;LVc^6xqcJ;@Z|Vd@{*z5wf&1~`d7wF{Lk@%_dmJ)(66`cxcIFhJh{CP zZu=`H9-(ys=yC5{h?>mb?DC)~sC?Grf5?OKKHP9U==W1kufe=SPTkv)uSQ%Ou8TUE zNwT}-H<9HUN5(>2~;{XZxB zw)H;m20xP*yX)$G3a;L_ZT4~Fy(Z|HaDR7B@Bhx~{ovWdmG^hcI&m{1fFkuP4?T4& z{CoI~Cvo54Lmx)o({!ulJ?9v=Ps@8UKJ?4@>7uV9{8MHhe{$Y4$PY1}Me?1>ad>^m zMed8n?LO|19{%yl&7$9@**_U)2v637A>7u3Hl1ueOYwbgBtKx+*L1+l^FI7(>vO!H zCqlQ`#y81y6gf?xUo&XOd?ft~=i~^q{G2()|A8MC?3YSykvz|g)}@J$#a|;jAZ}mug@Dh`r?`>sZ@LirBG3Aw zEr_2b5BS5fdPL&q5T2|@Lb#;|bRIB-+x#PR3*j~5S1^}RzV+ZKlYUffu&d;EQJ#YR z?CE(5A9fyOi|79qW8cQ#vK&Z$Zt;e^`W2OvJYds@1iyDa^GBE;x5p*?7{Zgs6~ZmL zsV?3Bg^thTSPsABfXrio#z*7sh1X4t8_n-oc)Sa^pP~P4_Y>7)d9mbvhV?D{T5fcn z5xo&P3got`UY#SIs;+a`b}V^(73Kl-0Uu#q!|d~Bzs~SOu&!bBn8{l$)N$2f?f-YN zUfbXPJl1QVlhMbRNXVymjmWEeKk5~Tj%o?-yJ&ML&rIui9RKk9U!k6E;}s>We|Uec z_l-Wm^jFJx+{@9g9Wmag$+{ESxgN;F4A-4PKCL)k3rcMtj@zF=pDe?C za6HHJNBDZw4z&Mtmb3D9QpO#^lld@&$K`Api7Bst#qlg-&PZDS;=E;KeamiduIpcO zoJX(P`j_-SluNt+k~aw9ae685k9j^qy;Wzu=k*eP%zivyPwU|#rzRdScBl*S;Siu* zj$ZdheTvhN=NCJKAJ7A&7p6MQz6H9kobL(V3l=~4mov@tdq2^3Y$MYtq@zWrNpvgb z=hrw7rvJFg`?&GlsCmH<$53f9Ijhh#E(m@CO$KM9B=R^eZExAU4Z-@Ww})8<06l+XDp}j z_1Kmpx?d`z*JGVEQeOH!U9K~-zHHW$R%rijIonJ4sn`cY{aB3r4cnJyPjxKV-L=N} z3!(?`USJFRvrCbM;(j|b(I><&93PPUIOIRldp`S-NBJi`jwQqwc#e*9o-SBlPw~F< z<}X3rQ9XtIo|n=!CGvV)%c~xbGYLJ{%Wz(|@+jY3pnIGTbpiOh3HhAcahFkk<|16r z)Ox0Q->FN}T`A7zdbIu>$!C|zql9)OSzbc8Eicrrj9=a#({diiwE zN9Qfp`F>iLPg>_S`{!`H74tFWFC%#odmmjB=BOCqj|E$VKiv12acKU~b}u-ez@K4% zm(}(@h&%1PE{v1u>BzdM19@P6ll=(PU*>PpQz1VaJJZIN_1yo?dK2*m^i{(TjNa7z zKD#&=`Y1X64gBGSkoUD5g-E=toXs`tPgwy@r}#DWdz0-+2)E@=-U|%iW%j28aC@H@ z#Dn=H+@GTNVXkoUygy|H-p?RD3if3h{&$v=zHj~@6baM@u4wDWNwfdo z=tsII{cY*TnetuWUzz^Qa37uMN3=`(5qiO|<@8OL?jLO!g{%_Y*iS<^JkT4SD_Z8f z31}+u>#g%9I)mr&^CrmNavb2s&zm5*<-CP^stVtTUJm7;Mmed9pYw7NzO6TY)}!-? zU(R=(J$)XH$Pw8c6AzetlF3_|xJm3n7%!N0)Isf!bAN#6WpY1LzIS8iP6X%k62Cco zJ}>araz1bC0+Czqh4Jytv_EnC8uW+xb~fh)w_dCJb8QuVXL+o~FMLii`LW@=TlN8` zw8reg6E<$F6OjI#7SHMU9rH`?-7ELKroY6_K(9e*-z%Zr0(wRL zB6;xf9R2Y2SkEb+(Yx3wr)Ryrf)DH~`Hig-H)5X;7{CB`plf>mJt@rO>-wi)c z%k!xeof{zg3+SpG-pAwZi^kz~;LisFJ16!44Q0rm13yxJ#(gW=LmtIFA?*UdeujIG z9w+=n&aY^CVb`eK%etXbXK8)#h?H-2fm!ToeepkBXZ#fJ5g(fV|9zeDTlDcRpJ)6# z>Wn`Vr;m5`{Ni{2rOxJLT=HGk#0X+`p?c zevVs1JL_FtXZ)0(i_Y&=b;eKYED`+f19ir4>3R2|`Ni+`$ML(>^c&@0*Un!5c-yJH zNPSCxde64tXIj5$|Id3)oGx?Ln-nic`R3zw#&78b_hWU&pNjY2yR6RmX}vN^pQ$?I zr~E|}|H&AB%3oy=?~|S9yt4aEg17!Qi2E!))%zVncp{%h>f3g{8h&|ydj9asdu;QE zU*4CRHU4Sx_N{n48th|T<@gTPH29n)yu+YYrW_%g_ zer;x{d~YDLRK7oe@Ae?S>>!^n-z7*dk>}15DTnvD-pc*qyacs}_<0(CdW9r$p6nOPeSXJ zScho(TylMH+Z^LCjX%iq*mi*Y-2%?rlspgVPkN7-)(J?T65onGeVpm_SXp{W{y&5# z(<_8q`c(4StpPo!tsBdGMIqd-4|$2@zkX%$`wHaqXuW5Z6W2GLqHmfO)ra4kuwAPV z(KoH%xAaZt{OTKOZ!UZo@QLNedMjgm0zz8!u9k1K`OuEzh<`#poW_41?L#ZH{<`x6MtS4=g+3mCx$mx7jnOA_aFOS zc>P88vAQR4{WzvYDc+yu-lNy09Nb@or)Ab%Jj!EMpu6|1Ir;-sWX17|Lzat_yaj^s1qO9`J+$A+e0g}(h~tc*!aI<}xAd6%xn%q; zJLCR|eLw4cJr@7E|CHSRtnu6Q`{z32x7QUuRcHKm|NqZCi9XGv=`Wx8M4hk4lH;DBPdLG$dYOf2@e6Eea z6X_Iz+jOdipXPH8_^0`K_r?7}^Zt*W=Waosq{#7`wVu|M&GY(~n%R0<%cCG4;pxio zIvvGf6d$%cZsI|6j$L{U$CvZ8u4dvv)q(Lqe$BehH2TyUmyy1R%G)z>U%+_#^uE}) z%f0ry={+dnt8dct2w&KI#qtq>*UDGb@XwWRtKpv`-@4WClb=+BT&NCNx0&|drn2%c z_PVoq@6TEXYJGMY`KWQdxwTKrhn9<0tbpL!Ay4&l{>I~Uq{XHSkJFSncgg&JAis`sB z2Yy5AkL615YvTRjpH73rkKl5rbvaBQ-~U=)e@j=a^TzOEpG^Gzbo?HEZ48e0F5~wZ za}Rs&WX|K$vuMY7EACUOJ3MF2eqVW#{MO*SJ(Sk(qLb=q3*pJ_g>akBv>rfo%=7zaxBbm`>trG0b0bVkmX_A_dd{Nq*Gyk2L@(>l}Q38T+&PEiNoWzLxf zcg+?b*ZA~#(%Ao(mvyV0UQf#DbE3_;(XOmNDc6mk zD{ao3&g%20vn9Q*w2S*e{`)GLJoA0I46onebw57WV%gnhTsS8O{aj}3nVv7lm-hvO zc^Zw)6VBzpeJCGT1lIGmM&=FY`P6jYLGa#r8mM}M`7`~`@;H!7R0FZ|d~)U-FvQt1 zF0&8P=39#|y+bsfh#c8+CiO%8lg#%ayjJ{7zexL4@auCX6~6Dhj`(X@d_dp1!&w&k zeDH$l^&9PaD9?DkF`^fL7ueL}eW1=MoKN1X zu*(btW4YGvn<{4q%L^uk_79}ZcTyo|(eK5iRup|}zGIK*-bcF);rO}T#kkksQ;N55zAL>9@_iH0HHd?7ZZ7Ew zmFgk6BC+6m9Cwf>{bJ} zt@a1Fee9OB+Ar5`t?j)o)?OO=!B>BQxdU7A`7UUdz8|2L=<#IIeD3`c@J+$&=-*RXAJcwN(yOF@ zZ^g0B5SNq8Uwr-mH7DnQOkfn)FM!X9iQiCJKVw!kUUQCu_;^J!d!41NvC zy~)>;AH9Y6*T9AE!QX}tW&g*N;}f68&xZjYTIa*GeL(k9_=5S7`fvKp`}wJT88`X; zfu1J)v~*igKknyQe5&mVeaA=BQO+j_?27rGrM5SweREDf+LiMMZ_wj5=kS~IwC^vR zA9`x;3!+agI}+$UXMOa0+hj)yG;ij7eWOR~rN@d|9vciD0{axx&*ppEWFLB{KeM$D zbj~5uMYj{sWuEMTYqeLFZfR|Aa!d65JWcE!=8^O)no9gY46`-z^{?PStE z>A5BUDSTMFp3e=yZbP2317Z(yTF++(M9*jY$S;PTkK2K%zz$$P7W4|@FU0fcy!Myl z`rPP!(W5ia=WScHT(SO&=&kvZtE1?80loi&T+Phy`tK@Nt#P?p`bDj;6ztr3==<-3 z!ATzxt4HJd`Qpnz{R3Z$ew<-E@O#gfDWmW8_j`$th@BGkJ)h$*{=L!r+TYUt{nB?= z)B9P`=lP86?;T80oJRWI_)A*f8^1lZxr)B$^NZwM(W-i%`cM9Tb-ho1BKh%W8+pTc z5xbcE7asgay>DL5M+Cjndr1!A=RhtI&*{8Vn)6OeA2EIzTsop}w&wlK7qEYaybk+A z*tbk{BS(aux9!pRv-s|s#6#<4{Z7*n@0-fL>1>+DfqJvu=gi;rp!P`bl(+BpJkOI| zS2K)HUajeKk$iux4Rd=B>CIr?HanS3@Xv1)K5QE{^PbA^`@1T`^3irk`+t6h`4;od z-zN&5E2l^HtK%HrXAqqc!Crt;J=r|Rf5<=HMC~Fk$@8}~+wZ&-aN5U?d{3q)r}HV< z63?r7FWA@RRq8CQf22RhbbnT*`X$eq?x#HGGGsc6Y!|?f%x_tfm)s%!lkeJPmDxY9 zN|Nsk_=?B14Cmez1P}AsvYyI(mQ`uaXJI6o&oXQDd&z5NxS!s7ZdHSfF5=>D30 z<7eZ(0DU=6_|17@b;A$!#`HG*KWl#b@2#mbe!Ksx{nTVFzd(T@+-%A z%U4RjWxrngRpt0|lHw6TBdJN+E*6LSs+V3(>yfF` zvcGIO@Dv7!C2Q()X)O9tHUi)CZpPHGB*C)ij}ekL_A{>JObm0KRE& z6#A$9%J|&}vux+umz(`6#=n|jJF7K+YPzS8j}^Mqc|7Oo@eJ7b@%<^Lm#hOLu4$3> zogSfAZoP)L3BAC&R33aM^n%?bdLhRKIm?o|jOC@xx~E=`T>d7l4@^F0EAmvBU#ufC zzaam-PW*;+W|G>;>pbdUZh-m~S&tO>KvwIg>;;maPMh^eflo!(Bd5t?pXK$H)4E`b+x5eH;TTv|m|H4)I+T$V~*EEQeuzTMnguD5tg^qWvqq5jl_7htT4$ zoBm^Yrdfb}Lx1wDZ-V`N9=%UqXuqQDI4DoEHUdu`M_51J4<+AekJL{Z#~kphH1vw> zXP|v0G=Dtq30hKrvAH-!vjh!|Gd^KGOqyW??Szd=drxs!siA={F3DUg!S$Im~#ivAbl^wwuexg zd@j-NiyhSR7t%L*ydm5kucq&E$dChhC-I4{GtD_})%n!+%j<-nd}`hB+w@sO@&Y?mFZ$d4UtMSX_Iu)_U(o;L_e!DXIX;4ZlIzv_dX2t5 zSFXg@{Ju&9|H7tVTxp*#kjLUC<=zhvwI}<$s653}Y2_>aZXlvn<0r^>aPZ!)_>G7b zh%ezF@cL$!?XQOii`hPT4u5C31HRZzkhAvjxI7@g$$C%eJZlH`FP$WLwCX9fI)xQK zY|KX2Nnfgh!}rQrLVij+)(26K>;sJt!3_6bc`rwK9v8=5WH%{Zq*CiL7vZY)0P+{? zN05G-r1IL22;!~`=+oQzYVFUX02qS&m9lXc7N`jBXXSl}EY{tT^Jssgwp-Lig4g&S zL_5C2R3Q4S!ulooVQ<&?o}hTE{at##G#x=t`CdESBM*vc(D4@hJBj;2$v9-1|A z0Ov)B0hs<)9MFkApQQF*3BMWpf;&E7>~SW`<5`uHc;Ty>?iZPO!(HRRpX~UencvJ+ zj9-~6e_y<_7>uKj`{A2@=XP=bbp6hQnr@TmM=$Uym-i!f zt}yhJ_3@4ebiW%QQfOBVnDy)&=O;U_2+9lHRQdv;M|P*s1NE35Sxpb&=T5K#)BhHU z3yA(b7!SvBkn^BjyznFQk!CsQSZL)=D~NBq5la9-rQ zUBNkY*xoGkPpy`D!TSO{FKLdC(NDep4mk0{5^FwsG~d&H0*r^|MaGxfCG(Q?h3+Zl zQ#~IIAOOEV;oX8bTJt5>3*-m;i3ne_p*!v`rT9kTBNK0dD>9Qb-n_&~k$jUpA29oW z^uB$lQAF5;$Ltf5a#QhglT8>`Z|AL+ek6S?`nE- zTz0(V7p{%KlgAs@xB4sbZKS?EUZH1LzefBj71%$DZ^>`s__pJ2&96Z|#i=E}1s)yW zdhz(S^F^8u&u2b-O5YJVZ{>m>f< zd4~*}eSbB_W7fH9-s@<*(2qxfnfQ(PYA*O!0w38&vp?ijb;fU>tM&JF#!vDV>A(BP zJmY^jj(^tkN*fg-{q}CH6JB~BJc{=falCWp7mMEBpVt|`9sj`232Le% z0#EcOB5=!}@OYnX1aA9bIZk_%T0Pe<{X?yE(02 zcQws){krq#+OOQm<6Kyl4`iGnJb9i%xWxy=h7^zDD(B7cxt5W6vH4=I^Y&beu8i-@ z(RD?SrYoOk(-qPsd7L49u5`VV=W*WTS#ey3SrmPOAV}*M(JQ!>yy1$AB@XgvKScqL zmE!MZ1o1@%_mqEGQ{z63pUD2PN#2j&)jU%^r~aHY@k4|U&3I^CIiep)f3n{6?qYg| z^HcBnBY%YBXU@Y1^z`no(k|DF@KqJQ!8QwN$4}*HobKIdO6G;%8^Jjy{2arU^#FRG zzXMe8V|GQUQ^+47Kj!^qTIZF3hsq(pHUPZdTQl+;?@tvU!u<(-AIaNtxqDK&TqaND zz&Gd9z5t{Lxu27{G{65tW-f{!<#>MyB_24Bm`1+wR;rIF<9=k7kA9%sS-dX-e2Nh$ z?`@c08ZYIuaE}fz;r4)^_0m-Ed_6yRvb=BH?4zEf_uUC^u&%B1Te(GIzjm-6fl1^2 zLVpL?G@*jB79(v%jToxafSNQqFbQ<)95$N@2|?_m5jZ{{L;P#n&vs>eagfE$#Dnx zsgL*xbdc*JeQna$X1P+o*e_S!z040iW%&*LhaVlnlj#=1ZMspuvbXC=0e&0KbOK!Z zyV~j>vj!?SdaRi&-(9Mi*Dnqa=t?A z6y9GgFm~h~-fx&$Aa-=BQS_x!ALZw2!8@tETGOcQth%3{Z5exup9DCO_f&-20a zXL0$L;T#Lr@9KQMAH+I|+<%Dfvwr$Ey3eW%+l8)wNB79?`x$<&9%^X& zr**3CS?-hUUai~7S8hjz`#ozL$9Wq6KuV7z*q3@-=%nvg1@JLVrwtm;>(wL-e2@KH zdEy@jJ0)(zxMhE28c%YN6a1w;k<;i|iPx3;Ujrc?V4HFpIm)P|MZCAyw0X3Xj>?)LH^1j6Gox-oOFR{B+mTtTr6@e#@ zE39wPE!dZsLBGi_(EAeMAF!X$^<86UWnEZn0u-#1eYNwd`uoh?FGK^Rf62bgW!=WC z*SrJcDL~%ph1c%)Id#TQejw=s)Bm&Q7r)v6H*fSgYkvE0-lv|o{;#fc|84%cug>^u z@>`*=+5dxlTu*WbeG|HO8%qdMfknEl^2%PXJleJ zBJf0g7J=LPOvg7!b!$6s@)D^YvBwU-f1-Ne|G~a?e?#<}_t&t`1#GJt(E9}Gyovqj zHSDpm+XIlthDi<2<2l`*kbC!Zx`odLQR(&M*LD9;^f%6};5-7N0`VjKOkV7KmiHrd zBaSJsp3AAsGBoGr2H1bj%Kk8Tm{RV9_<0ReASvkub++t}RN0dFnYqo}F4hNP_;PW4 z-n)^{fS;1>#)3AGk>8y@J08dGQX04S-Gh!Ka-rVjlh%J3+vnbEcMIF3&qvFk@48| zO+4&i-ofuHF^5HCSMeJJ)%!Q=WY6h5r>Ncf0PYKDw-qg!cIRTx$uEiGzrW7-ZN04h zk$LO?`=$TM`2?PynE&IwPvi;^d9Re-4<$KS<;2gcKs-U;aawt{nQyJfva>s{VkO>J zEb#n+K+tb15i>I%ne!?jm;BwR`To9>-aj?7_lqGHeUy(``AmJi$g)$!{|Y;Z=}+g2 z;ODmSt$tt8JjXnB=yHf9S3XPiWO}F;=+FAtl*7F#hy2sZ%TR80^>R3XauL;Ac?HVT zd{)DI6F>JYcxhXyRJm{&X!;W;@?}8$ z81R8A@DqJk_xEkt)wlK9&4rzP*KX^>WXJ!cn`QrpQmXjpJ$&p$r2=~V{?PF5k>T;7 z@rly#M5Xe8#fX*q)m6u;l()ezi$2&^96C@M^e0CA@zR7pT)M%(t~jxK-$q{-f{~x~ zABgt5gvwQUBdGJFO_Razq#sY(H1H4qyv=#VFII2*i$8tI2fz05^whW2-|YOSiwPj_xVkyOn*ORrg08UBeAQxiw~C-9O-*}u}! z((c0(u_=$0-F(&5p=A^ijRSZTa8cBC}8 z_1JLn;LvW<%5}v-|GHxnrE$NcM`9py9lWt}`-3+oQ6+tUkKq-&h($`da_(85=oR zrYe^(y_g&h5?$g(A@OKx7{lnuWYUtQiN7I@ZsiF)4bNbFwKhfN>OS1i-?VZREzB3Lzj=+w^A z#005FvYgvD_}3j6*}Xq7o~WIB@Uy&zQyEbVdT;b@x_yqfB93CTM*KIXd#rzKCoY!vC`mWM@nP+O2t7N#|VVK2*YFG+eXHA zjTMK-VU31HhLw8TSwH%eU8ljJ?zd!3SfRWm-dW|mHNj@B%{)5 zSm3~Ve_wIDyoxD|j+Tbm0l>t=OElbPFyy|w1Mi2F{~Ig8CJ&S98%^|c+RImE2>O4; zILJF(rvIZOmF*wugXDeeMKQsx_I5+L6s{Yxo`$!m|6~fL%KZVpYeWlJQkjbC^RWk} zlIjG{nm6ag|3lBo{vJLsvC$ttynAhrQxSUHHGL?gGAmsSlef=T%QRaX64I|@2hx%k+LI|*+U(z z!XJl9M)?18YD|OCgF`nQswg1EalHOk;g9qS>xD99hJXD0jUWE?T^~Jsy5rC{e_YHg z?0x^D~kt)U}j9*5XDjM_xHc` z_Ae*NJf8i=)PW0J@o&6%q_pekB`~3-agqdM?GTe3p&s#n=vHC#rHOck#<}B(FnWmb zk^RH)lLt6XI+>Jn8Ktb-Cj+u)Xm|+SjH#E(@!4Y!JOp2FXwR{oF`Es4>QJebR*0%7~2#q2WczWwP3 z>}OT(WBeQ9Cr`uQc?>CngZ`G0;fc~w@+P~hAgcsUuW1>OUSB$VF|;pYOLEN*4-HQ| z?R;G^3ZJf>@%5)l4FI;Z3cRTbd{Gs6whH`;;r+uSHw;tQF>!2kx^!Sz!1lewqAPpu@*2r(aZYyrTM4#meEY8m(fWg8WgCD2NXtfFCR#MPN5F zHZ*Yz`EvPd z)wleF$HAjeekebUPWq!1z6S8SKK={uegAmP*5h}6;pV?>9NW3SW5+8ld+6kI7GL|x zv3K;pv^T-NtO+XP$I~wO$#fzID0R?^K(D3>qPH$ z|3?K)1c&w&JEMgYen;Bp#C){vgyOMB*S&goM3crqa&vssiG-)&qvM;V!kdI&$2Xly zc&gO#O&>GQ!}&8LV7GOg$ALmCOu;f&#G8AHLu2902a+ijfIm#GF>AyN?3^f$ae^i^ zgt3YktDx;L+$<>0I4($g}~p@(=%yKK)(V&{64+%2a|Q zzaNg=-#v10^gt=eXH8^_}t2eyDCk=Yn02bDY|Ns?nJL@z-R#}a&GjKR|q zT(owA6{-?>;MUR=ckSq*8!9@8N3NSjve=MHH&l*4k4#i{5X_hYJ4hkwbwfe^|43q1 zEbPLOiNoPd!mlF}mE*%B2P=;+!awVi_^*uF4thdvL(Cut5cvVC&FUBa8+?1ln7JHRJ1tD)ayD{gp)=4wKm5=t57}y^6oS-F9SUF_4tx zqE*(Xew$pY;l<{8v|Cncm5VGk1w inb&;#`B%UCk#o9QCNF>C?$5P9<#~HI?6~?fFZlm^R*jSZ literal 0 HcmV?d00001 diff --git a/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..55d570779dc60bfb6b763419476749bf4287307f GIT binary patch literal 75360 zcmeHw3w&Hhb@$wT>8@-`YuS<|S&FU_g-=-jOwq^wL{ zgMS^zbM8`VVY5=lFTne;v`W!)I3>?~Ql4IuJb^s*0>Ec7{O&GshSfbv<-VoVVJdgj zG3hx@{~o6=2PC6dNita1`>4w@i zdMlMmwW@C9aFSc_Z?~h8+qm5h(`(qt2_03=c_-t|fu24c)e)v6^@IAO+zx6-+gk58 z7_V}dp&sc^8~USEC*UhW@JtKRU+a?RJAs=%>3}=!KTkcqr9_ z_N>6O&+(i$;h9c&iAg-Km+>Pq@aU#fgG6uDoz8=vlrfz;9}g#$cOId4wUzNX$w4(l zucC*oc^B?hLiurOt^Wpj$ z`LM6Ce7N|hnm)@xe;OaKGt-S&nfTv~+i#h1auN}^Nq3wy59+$fpUBZ!+U~7LC`!_i4GU`kO*@QOd(O-3j>TH9YT3VBCnROfNhbpSudP z1#)rW?`Ziw4C%)6FTwK$aF&}3zp3Ff3~!+Z@wh0RZ;8$)@*aAiMdL}fP`lk1BnG*j zpJ+VC7>^s^k$LK_PUI=yB9P-S&HEFJG@kS##NII@8dpN$A<BA+YFuhsog{u z8ZS?`UuC$z3w=7FouEU)V>-4YXDnYKnd!R-cExeVEBxf1hjAo6N7`XZPrJi3{us9z zwFc>z8a~P%)O>Wi&?lac7G0z9oAJwYyJ7j19Y3b`qP?bFO=h>swPe*0%lJSG#$$XxDyNqA2kC$#nd!dhkfBpn z=3QSi_shA(&}jkT!T3P{aDNtjCQ7G;ZxlMY4+)*Xcf?ql+E4^v`SPIOly80y(_s)Ao%l{Qzm41BEj%i8a_GA|utV_P3Qt1XE%r+5S%&Y$4t0hdPx>MH4K=X6*a{tV9PM>x z;^8f;H2i28_=`0BQ@}y}-j|g2en+?WR|$FlrG7st@234HPdcoR9l)vmxvMoED|!d@R0vYpc)+^=6oE(1|pz@ISPi3g}!w zddp?f4|g5#5&y0S9Q>Pud<(bV`GJK8_BinymG39~%TE!19YZ@w9)NE}?rHv&enSYW z7S#(~gQkZpKKLFl0(qEnOOC35KZRrt4ncd=VRRk0i0 zt0U>isnfSa(ii*bThR~3W6}K@ro+W${Sfi@19&{01Gw#nRKvd*^{;3P$~PK+Zl3YC z)`|aGqCZB-#3zjZltR3M>}RGAo&)-w`-AuO`n{Rz$NLWb&h5ebt@<78B=IEh1>^|D znP#ZIV~7h8e_iK<<4Nc0coLAp0f&u-FoDBy3b?lKP?nfZQh zh`+)AkQc%He;M)>+VWNSFo4JNNdUL`@cJtF$$?uDs=r43><^oMLU*f^pzfWD<|W4i zl9-7_lFPpmd6K?Y%WuT%$d7JC?B+PZJ-m}V)$$v^B<@D?kMWEDs(RAq{c5q(Bt6-e z6nX3+{~ai;l4m&M5Dq6(Y>zA))^<=wN|#TiL~f_H3LgF5m&%J=PYqCc;90HV>qw7L z_=w-Z7J0S@5f{4)#@8O*kM_#=-Sg7yH!r-z_!Fr-@#C!Pflgq0Ew$?L>C&M7+^XwY zV%4Ln;ZJCKB`*?sB{jX0nqG)&6TP-F{>93}f3VqF|6KUQeFgZS5R!|w{*(C4+~wvg ztKhFwE~~}DuaGw!*KpfFzXFYKA~n;6$FRft&FAyHiHkaI62~lwgG;$%e!hOn=Wf&T zD~I}q+RpdR1!t0$q3C%C1@qaI^X;&L*=ppk3XNSdU`N#q~28;kHp9O zH2syPyK-h%k3XNSdQ_q@_+>tm9M4f)r1h^f|20W{?hR;n0dy97&bQC-&hgRK_hOn{ zpukkh1>Y>hlj2!qKWFd+GXh5TVe>#veTtr|@(!*3Q2!y9TmMev)mb?wPyBIKE~SnR zsRhW8qH^vI@JXTVlCpYM?CJm>uLlFTt!GE7;9o#|8mhlm{5z}Q7r7O*uSWbz+b^)Y zJOy1wVx<7R?Czp^LB&p=A7yk%)(_+wmJ zx5zlTqu>+RWd?8SHqnRfZmJmR;3QsWl64yt6h4AI=&^mMk{i{flr6E{o?jl%_k{K{ zlImC&^8w2v$mh-KCMuU~lk$)Utk=;`mIIEG^5V~t7(;pDM=1}%S<$ZC6R|7vC3dA{ zM?yTayl%PHm)C@!<_G&j?ikutXj@m7Z)MyAc>K5raEou@Wr|){j(S$`eADX%s-Nk< zjMJ$(BA0!+bXH!ORU|kOOQ&O=#$nu)_Np61W z$MahNxA@H!`zwIk_?h44E+>_?V@|RCb#aOPb(ue($^N4CESb;ys?2BUe=uL;=^Map z`c5+bd84m8MflMhffr~L*-Uo#d=l29)Zp?jEw}-ofqdfX1B{y1TE6*4OZH75jktDf9i+TwnS5DE@2!kLRNR zZqN7ERKYKN8kDaQzv_Tf2|jgy(|MBYwbpNGJMFc2FVJ9$zl!d%9p3UhJ^n%mEG`Kh zl-L)69mn=$^S8Tys9G^U4ZRj3w@QMSWgh$UYav@;SQ9Q8FR|MLd1&{K;)Fv!9_8Bf zdtDX!)jK{jRq)s8cMy3Y*T`=IzoDPv&o4B7wR^Zq{SwEWGk>^8s^G7;f4<-0h~J^( zZ|T)q9xl}OUaRAYU7slNx1;`mL-suW23Bf0x?1F@@NaITwj%$T?~Ux+Uc$E|!Djh8!n=_D^QjpAIU3 z@ekr+w!W$sueaAbZbknK)D9hoC4Ds%!f(wZtlO}hSabq@9nk}Nob_6Vvzh!!)sa?5 zssHVG$NbL0cP~&seS6)x8g!w!xabG?XhosJ)$r3gRg0HX4+GZOpHolc*@Ak2xAmt? zFY3R<2b7amU7>c`bjVijhx-P=EBRY>>V5Y%!jIN!`)LMlCezEF7q(euXl z@8D;{E@-_)kGJOQ4Ca(W<4f|I_+k<2q49i^&c|{C>3SV9gRwtO`Hj8l`=dJkpq#5@ zep>#h2mE)SoJ#gfoD%(|`qFwGXgH?`8fYe~scjWKUB(lI#H$8;oO6GQc+?$%KhAm<@`==EJe~yMq4C%L4%=Hao?-jGyxz6-YCOGz^7eR&eILMW zdJDe<@EY-}9?QN5Ee?a;ZZ-Q}%i(i>hIw=+lo|9jkCUoqy-|KfTS;*Xb7zJx=3ka?ZfUuOT$4CXuA zA>!w{w}Gz;lpp5+&O!g~z&g*}cuVPc@~P6dW?z8W*RLeb>fVWd&*!@5ZxMd(znq7l z^EyOlV|N(6YUsdx1^D_|+98Pl^7$MW;~r;+n0U@C_fxcdQIDtQ58{{O#Fqo6A37eb z?I_OME)+truT9MC9 zu*Zo`O_@BNzg`dU6DawwK7yC3}h1t;kLSQ?Z@2 z08jGEz#K?pyQ!x2+ngy^b^W%^^-pa#1#u8t&ypOP&-L3n<1qgEXwKnyYmUSd*-d_& zndM~=kIPVh02ety>+E9B+(+_>(jiAle=bf+{KKs4ElA`g&f@6(J;+b<8-A=u`RNDr zF^LV>O*DQiKVWy!`847S;uHKiIu25?Jn<8kr{*Efh4?@0H^d=J_Vc~lT30w13`bn4 z<1%fVv_4VrYiU1>3wwb2pV4;adz9+GfY*slecZm>UQSo;9}-MDf1F?0`n5@QvR6a#{}ej zGo(j`U=K-r3i*<8=iQacOB@AiBgt|lpH?#@SJJ7?iAgAz|1k3yB8%FYZW`uxDtDjk z*Tp#&%gg@)0 z^18f^12tWz&-+~`=aF$v5%oGMy_wGesm!&Kp1xM{rT5GJK7D?ga?#LjuJK*D_rS~_R8l#E5-e# z-GM%h=d%EA^NERz`1|aj>Q^*>wQVcMLxTMwe^R9#^~TqPUju&lK(s!8-$SdKm-Tnp zlVT_M@$tkuy$^3T_8_%e>@%XnZmbu4-RJYwB-iQv@CA?;&tdzo!}q%nb3a#?l_TPJ z2k`ju4dB-J>UGcnZp#r_Hwxf2;#Z3_-Dtt^@(}-iS@+-UtE2TA#Nt3Ejz<%@SWjYq zjM(R9UnH-Cg72V_an@5UTQ$h*YI+?%hdp5|e;;7Fm6wmgj{!WsUjf{rn}5GW4(%p6 zxeW7jkBno1^fAl?=w0#ypf5u9w}r?3Q*LKqf7|p&@yNPhd^>~k7JjN8b`HgpMQ_P_ zFmJ2L)jH|qzNX4J)T!^yIUdMSy)IHljv6^e^rP|0X};w6F5Wqw-2C;&Xs>{LFT|3w?Xgj@>?=cd+Gy{U6!uL|*Ihc*f|j+PUA$ z(5`qnS?74GWuzaV*WRc1-2{4WHv4V@KCL)j&0oTD3+=!0>FDp%A(t7Me)Q|GxApHu#{hrv6oF9&iB?_{SmW1n1+^qWsN&}%pA7q&~AvCefb!G$h?y;e=n)Tsxm z>KR@)s)J5?-Knack$!7AhWf;w>#^*)z^}9UUhKI59zUJ|+~)fu%r61l#$T`BY4jzn zhk+kxoN1mkL+mqOn01>@#7)_rn^n9NcvfB{``XqO^!{Kz-(u`JmBZ0IlIu$m@euj0 z@2yE5k#<0@QC!8^m$vx(GLK+Yn{zX>S%0VfUQ&;VBl+w5df(0>#BsPCt*+{7{=#W3 z#%>UKpgLd&Q2&=A3d8MoW+PXKZ`euJ@J{iw|0nVn+b8Ha`wMs? zzDN9Zz2Pryzt!e108g2HliFYKUT`Ujt6BLa%aM`n{lHF$pML?| zp5Jn=J%GpWXAI!h{8xJaS+Ji`@6%uIdR$chYP~O2&dsR=ue0R7!1Isv0Mpa`3yfa@Z%WBIIkWH1 z>}N3hWd;Dh5V>ky$Cv$tvyL|z{~`CE)Q^y#W{+FF`!7ZA2IFRrdzJlBxRy}l0JV$d zfnM*R_|Q<>8b3Z@^8-kj!u)X`K|Z0o#P>u`m5+lSi>ynnDY08>+!wLvm%=A!k>53h zM$tVAD{%j!gS9r2bJTA1%l)`R@v$zIS02*a_y&rz)pFdPr4;OF^$v6c`RV>b=YqEL zdw-7pLH9HdMBd3SV0$FHB|e?vXW{bNUtm8E=_`G_AB^N9K4!eB?@&6+iKOa9yDfW@ z#1`={t@jDt#h=7EAhX^Oy(czmC$2A{H?3>L#GC1O?s3q=6+5x~J+L4&?CAg=&wl~D zgpagdGx6dtn||qd@u})>pYHEhsQyrY?D^X0{(d#oU*ccj#|~VQYPr<98pio+dR~c~ z+E{Y_ZoQGGkjq$0u;t~9IWK19CB2h8u;k^e`OfTrGVNK(^9h};5~pW*iCn~hLv8EJ z${&Bf7#bR~7s;I%9t!T0DRX}kB$wDj*8NG{etPu&B+|cZKe=0Be--d1ehkf9_WRsx zXdEl>jmYU>9@IEbs^aIkMyUQq<7c@#kND-D+qu){TB1LVicq6yx>s9!DR&o;hm*-E2uPpuG{^Ng>e!oHYd%(0u+qcG#lY3;8d!_}? zU;tUJ^F1@l{Z=FBR)JpMmipZjsJ8&>n)6B?($Nn0pR9DU>xbGdEGK8(#{{32cbG4b ze}sqjGXp=JKbCzCPGW|*9LxB!{{j0J?%{VyKbD|-aFoA~LOF78&v&%ml6|uE^XG?r zJtuxU8lEEij`S+_q4@W~pDpzQz;>(qJl#*&jVvEDy|7zP-euiIsWY`ac#-7Kodjmg z&xYf_puzabUL-y=?O)qq{5F05cAoKnw!!$>-wDxYqQUrW`uv}H#{aGc8CL?BJk$jri-9v(&%vI6OZ>r~2hM?L!UYU)f;%>=%XP)E66!pZ4{H z+y8|IR<{AHI z8jL?-(dW|*#!r50nEr(Z#Tn0?{@|8_f95(cdL|yMS)!{S%9y z0A=Fh6@T4ef5*0CE|YdNEM8*czr4ZtZ9R8I6u-Nd-rHVOmVY^(9D>L5Z&1D(|JLdc z+IBFv7s0N%#;f%C$kvCMe@M^JIaXRPA-zNMTlCI;rppV<(nb8g03J`50B+Mo$_I3_ z^_rYd3E*~lI}Uijk_#rDYs=+sk;^T=-ypegcT_Gc#=37rE_BbYT&DVBA9-@ncLeOX!lCb_YqEr7@KcL2Be)2)VI_&+FL zBmR={cvFr15Bb4-TCZOGS(N`?y_jFx}_r^=1d}7b3OX zdg|M4ymiV=J5Kn{dB(rJ3jT-cSV$eD7_i0pI4ch)_*f5hq6>HU(=xyperkH4kV9up@s@v*A$9a?W+G5h#V z%kUie)|PAbxPPSbxVs;V`gbnb3k4|Wb=Vg*NZqef&zkj!_f}~~HT`4vvjh0T^82F= z#&56Ff1<(odHnf}KskvgzJ$_4`yK6gpTtAvzHhi1exmn^Q2p`omY{w+o>L7! z(S1JglO0h5e$^#$PW!tMCF5_eXVJLOd4WFgHR}h+Y3PT=8;pLaZyjYZ)+`FL9>eQc zi?h>Q1X`uC)GeG=1C#^LoP<1jFvk}8ji&oIqw?Cn56|d&mRo#g z&Y4;ByUi=ZXWg~Q)i&Im&w8ypDpyPP3CuH}QGE{dYugrj!rpm47Ity|RlmP5`i}Ig zb^h}>|BFiCIB&@KHZ%Of68I$=4xZwCS8%@dCHXGn^o@2p_|Hk9jdYJK+Iv|Pj(t1P z_FrMWZ?@jAw%)gxcU=$NS3*0e>E5k`lJ8*1xxWXA{~5~XNM;vo(hw>~ND3&CUU*$~{aXF+X!f0e%9@ypTv<2!<42kLWo^WN{b zhR^MW+HKok+^!J3-hTfJ^?R8rM`xmw>I%VQ>kGkcI@7uq+0%JGZ-o~i>?J;v?-04) zqx$I`{?j>p{}AydCauKTYoFbHlR!1(j~^=GbJhsT>2s&Uk#qhT zb8iriF>}8<7Mbxv(Jv=>=|PG= zl<0r@4(J_ppE2ax1^iV-nMYt@;)iUW(=iW-zWg0g13!cL#pCke0zHnp9P%?>_g{%# z3Ys(ucTV6w;~@pAgJ3+b^?A~=-|b38zDtofiPMyKf*;{$+>3dVP^fQ+Z?(|vGv6~! zT+Q>?e1A;t;l{2+a3I&4sOdiL_6J1Xn(q(6`}R<8Hs~MM+lF`AA4bWM`rY%Ad{0F? z=J#Q=yVmbbw|~;AKhO2kb=A>+w}D6rMwR~5*{YzH;!(2Zuk+SM9 zS1-QrT~mE0BK0M4Z;hwkjIje-@%<*KmhY2XqWhCd@;%v0j>vq*`CQ^ZNHvH8(8Fy@ zZ5IDQ%WsuR=Sj}zb)0>N$S=1oZO-*3XIKyDxQfDeQ4lu=9&_J_z>k?YjMK*Ql<(j1 z(@^^38|_cnN+Ki)}St|PuPaN+y1_ufSALjfP23lt=U zH654XXOZ|r_=5S7+HdY>b5x|=WcT}Wn%1Y5{;prXqQ2C6MJe-LR$H%_`>&1OFm&_3 zAEfyE6Yh_6zgwY)Sgz^skn%c|?#B!8^yQvY5V^;6S4r?6=^2hg71U=uhI7-*Np*p893%Gov5q zK7nvQe7Yp!`%|wzXtL@nEB}*P{%4lx@i|xegYl*QVZKR!5QiXp^MsaTU_X)bx}H?h zBRRKpt?*&(ay~Om`~W#WEczg$<$PLyKO((B%lW7tIOgjCte-+|RKC~8$bFHcvyta- z(esMsS9!VAuzB_4U(c&w&nwJ3x@d>wg`QXK17_aAegcoTuffz5eQ|2!{nNkZ%l}z? znK1Ic}F{iuDv^cU6Sep=*t9^VPdfvL>bpDdt;@H-FE>PghPq(;fGPN;!D2=O9;qI?w*! z$8~%GaY-J(rTDJZ>i~DK?+o#tR6kr1JTXoqJg%S6sS%DF1$@Q*YJaJ&ALnkF|0XK)Sz0A2 z-h;oC=Cjm*>}yL6%yR!5`|&^^-0C-3KN|FM~W zT5mpWg zZ^ADfK0hH2`2y(0Zp^E@@vO36z5}*vk^5QS{OZ_d=%we^0mDD1c7B0Mjnzv(uOfH1 z+4OI4{r^#={_`w(;oG(G{z%8W>mG0RM>^iA$2+hapAmn=*gv@cHLur^%sw=>d&})V z>o_j+$=~;m?@rP=K{Jp1^q-Zai{5(5N`K19N6P<9-XXc=KG#N{bD(vIuzgc`|2mps z$%X%{tam%UUpJqy-tBnIk_)F;?;1HeyM2m??_&Kf)e8Gqo(Xw^d|(&s?`q;-2~xJa zUJd1p-@WAggTTGK?j-9n`N(<=mE&~xzi2*p{!!(9Iw5%M`dA2VuaD`tzxykP<0{Lr zkB#)0ds5^G+QIyY7PEh0<`44+R^iL8UzvSi_&lW=>b&1yzf#bTUBEg#w|zOwuns2r z2mUYNDf7K(Kw|ghEWTdozaXFe~%dBG)q5k*9+@~P`^!2pkM2c#Kz)rTrT2NEH^jF} zzxSE=9O54+5Am>C!`G2rO=5%Jv0W=q?ScNJb+yiy3H=jZW&G}WY1VU{JI#I-V_(g( z9_)oXPIT3DPaqyEbZKyZUaR{vY~#oGmzZ9%4h+90C-po1La)q94et0#|(og?w-q*;#?cw{}YWd8YjKn+Lszfu20exSWHKJs1l&Ucrahw}Zx z5IlY!2IcK}*aX6@2+d=A-yH2<84AIp_zLoZ;3=n;?>Yky zBs1+hX^QSU=`{O?A7Hu#a@6jZJ$~_Y3(8w`!~O(mcLMFE_BUZ3?m@p!*K$S16#&s+ zS<*+9>=yd2*NTu`65pP{?y=j$F53W`5t)^U$fto)TL=xJ0V>1o~%MEh^sP#zP>;qQ@leN5z9=U+wV z1C~))?kOE-?Z#T*ahi`-Ii=1>V8ss`u+em?R}~z7SI!c$Q#y}vInoc*KR7eoew9ri zKlh9MF4CLSZ^|{u>pL{>+3!L-un&~%{29ux?FiS{^H_#cL)~B3_B^4f;&)+zitHIL zE$1Q9Sa*l**ZH{CThv5?*XVC`eiPjx`t)J_lI*Y_X?)L+zt#Da9xqKtjIQNxVF1YP zZi2msaqGTV=l4KQvh%aPe&_G-cK^2UNtWcDj2qv_q1NJ^^6T;Lew)^lYn?&nKiG}J zhbiWVZp1&CPX2mm+V}w+7a;}^yEf_vbRUuSWiejF58bHoFwqLGy(M=OHe%*tb9?1;bwT^<_d%NV5{$_T` zI&S8GL+g=VQ(n?>j)mr-0vz?KEcgj+>HFc)#H%`8TkA}HUmFe^Cg$_=MVN15x!R%y5s#)l4s%{8Gj30 zk(#0Y=4IbzDBdLL!)E`F-nS1iD)KRD_6bS8W6^vwv_E;M`yVa&Nb*?Z?QpsHRJ@$* zy4}p9xyi|{Bbp9cPIkejD-s_)Q*sjX>Nx7px+K@SzgyNHgB(>|A$Y7^8-iPQt@y3o z-zkfq{gx0s9)D2Y!Y_7rsD9foV!t&6x6504q7!s+cwEqLk|W?Z^#8HGYanr3>7Dg6 zm;gU|yT7LW+a_~fP5j&LuaxaC`?n!@e1C)TR(r+24VAb1EA$M?*N9&weEmn~Cdh7L zzrO2-nqU2Ric^b!3q0Dtb))`m_osyqUG{IgzfqQM#HTAl@c4cOADXKgqV>y#V?6C@`}=sng#(&B9N7&`A6g zZw|HJCHtq5_-%jeMe~gRf++que|OQk2P{i`6QawU@zQzl5WTZRpAg=<`O%_xc0q&j zTk(M`eXnFb+fV0=<`X}yug)iay6-K7AMqZ3hs2Qe;%fJ<+2g6#hZ-hdZ2ZKZq4qYM z9~aIqe)n(Z8UJSme|)?Iqe^x}J^G|fd`89_b_vH}pkIDK`UQG3@@~@2v-?|7c=rkG z{jun~U+#&L^zU+g{}N5_{=W781M^OVz&bkFQ)Fs^Xl$Ru=)zks{ymn^@f?l=!AawE zILl;z;a~U-;-7H5N84qvYv~+*q37jg?Fp)?D+G_VCqi(`p2*Vusg>~adEZd^TJh7l zgG%^0K3)fY(p#1A*V+D(cs=98c-6Ij-SZ0V&!O>HfPqb3ZryD825nb#2QWT} z!_0c0#va)}HpBbzdtNzfKG*AmHXoY)(EJF=N0Of`H?wr_S)nHwAKQur zvQN(}-6vY9UM?4ER~5d&HVdi8OXR7aZWpSOap7|#xW|Ok(QR1|Fz2+rh+a|Z1mZ{F z?JT}Gr`P|XABb8>H4MDjo|L5H{8aG~y!Yz&P~4XDotDt~QhCY;b+wxI1t2`g?VQOZ z`TP&5xiEg@vIchbCX+|~Q2XXv~;;q}+Gp+c#?%p$ZG`sI9%dtlI5-un90lDouLqOTo? z_bn{PK7E!G`%$#e^Rg=PH-N{FcL2BLuPwiAJ|_53h@Y(TbWS@gSLhfW_?GsOc+OkE zPc}Z87lB<7-`)Uj)u*WZZhYKg-Dwyhi*qpR`mFO^g`fI3`f%SJcOL>Cpppkv$FG! z=S2t}%ReEw#Xs58tKet77%E>QewD&dK_6ou@C@migg{iC&)`_y;k{tzMenV<2A>1>7ECqe`X)B>3`2BG~Q=u-KFQV z734mzpJHPbWEPFjaT*`%{!Yxoxs8v^m#1bsUvMRp)X)A??@z1neE^T=%K&ciJ&6Ib zQ+nTtU)0~dJP(5Www&g;e5gHhsjr^6OFP;}_L4bA%I(Gb25s*TQv0Ee)ko#(K;!@g%N_c)${#1w<2kEaD|G!%IoAe6#k!hzEdk_kHAgBJ4@sZtv_f>g?Gn1fW8_QSdf#W1^*e(po z6T4p!#~8mr9>w=7fLnCKy&Ka04BAiqaNw`)LI1uGvJXS@2svWUhsimNP&@2)Qdhe| z@LJo6^5HlZ$$!W2T}=Cwv7`E&=sDz+oI~z?!0;_tmi);%exJPeext`>Pmo@OPhF&T znS0l;uYW$j4{y$kDJ>UfcWw;rBYKADR`dSdGIo70BnOWR@g?aYvmS5m=i_y9r1vFq zat{dP3-`-Tzk5Ib4+vGx9=KOTI(v-e4j&)hE~`z=wSj$B~lF;{rU>_63O_bvM1+o89<`K!uoKoIJ(67&f9MtO? zy+0+ofdBa&cCtL5($A+&KH%ef*|TQ=U*WU`W#uJWrn*A#*!dKK+w-2*--k|H=;OEH z)Q+@w4X@(aQ>?z ze)qg=p8UB!u-jpli?yBq>eF$kM80x8DmcehQ$PAZ%j0NQVmM-Vg1(6(LMMIxPVRlC z^hr&pRT|FwaFH&01omj2_($KT*>@)6C%Y)4dhxGHd{K{|zQ;`KAHg{{6whPlyyRh$a zUk=%J1IN8W@K`w)f?IM-?7|S-#?N+V2wo%pATEsoyqx1sFaj_>AJckA;M&Y z6Osh<*O<5=*o@_x*^hP>-V2x)m!JZg7ht)%>=%Rg%m(AN+U>rp!T2eTNAkt^_xH{- z{)qvGObgx8<2$x4{-3ZLgU1 z)hcpmBWepKF@ zPA0|AQAxgcukUMGucwpjC&2L$JHkuld0wkDpR?$D9M!U%%cxX4s%y#&v;CZwb1$$+ zB_C|+d#RnrKvI$m>REE`3->aLote3a?8&~v7QRdrU-r51`^ip&zl46%xQM;7L(U7q zPGCBi^AX^N1ltp!1Nr5!LyR3E>Bf#27Jmr&+1|+hhTca&<1yqJy8?sD_JcgP5OFc! z;0r7M1abjH`iSlio#1v7A5m$~`CaA5o3gw=G2jQiZs*^pU9xV<>mxyaw`TrA)4>;g zpV9i6*Ok3zmE~uWBP&Ai`1*qKHa|=G^7Dn)l|%ip%d_1>bi=q4zhj?yk@=17H3H~! zX${inR$Mx(H&ao=>UPMO4na5cx^&Rl^IOhepxyCp`5!54B)!e<$%*rX;#9e~F)8cgXyn z<@45XzKzAvJtI53>2YsrWZD}Wx!Jp+IKBPGHC{MVncvAz9?h2laWde;Rp3Vzb+W=A zeDFE{0WD-}$)8Uh`qrVnnOrXX{rw^_#P8Y32ABvA7gnq~UbU%LRXk5BU+j`L3feO5FuGSl!@G8=!>qjQ0!f2p`D*o*mugs!8{)ytDy~U~Nk;zNPCiVxk_2H8vQzMfHMs}<} zG*;X@y4_TILve?9!=dSsDQ`r#)Ij99<5iXG-|?zAssuPHh^CXjeBri7s=&WBHa>Rt z$li(RLtcCbc217(1xtCmM<&Nc_IOhh#qE(1sDz)iLlwB=leJ7Grd5L;TI#)OWO6*{ zJN?>l>E@N^oHOK26eo*&r@Wgd$H#Vsb6i}U@+L+my@~xd@I1i;iR20DkH>H7Us@a&9>eS@NHZczkSShr8#!)Dej6ee7N)YJu-dcy5gQaR_ZKYTOQ0;%-Qmw zR|Q@XtXlr;#FmliX%auA**C57ZrC%veRo*7XFm4#ye3i^Q3QHl_$|;}GZd~)k#acm0Gesp{+uEtPW;?AG!yE*h8^RuFS`md=4J8z096iRy6ng0&I zzNWwR;oRU#%{-Tm?Hu=Zvbh_|0RFmYWas$g$mYorl5uvgn<{qT3yj0|AT+E8NT;lPmMjSH0T&p5=2OoNsajI(>{p^F*v2))~$m^BeTJ zW(w>xR)#(k!;%KZVpbzBROxI7hy@5zUc z#g*~XS})3reTtTo{@%Z5dW|==fBW{4DWelg;ZZu@P@Edk4Ctc`|Iqer*(`ha?^)yR z8kycOxoc_*SY;a2ESouu7{B3v&FmdRXsp8D_M=GR9Lmf4sG#2tPDcS@-Rp zsLUVXrp1jFHXBv^+k)BS?ZtQ$cR}`J{$TYs{C92B+dmn8w||HA7cgyF`C{N28QTSu zX49%oDCw% zGCeUG?ruq2rvg``1V#z_Zr)dM0u-kX#+EaQBtLlGn*{4Q_M?}tdGHtad}RO0u6^J6 zQ8Be}=z(3|UUv4EJ2&6<%WJm0`@`T*n?+M$6o#Kj@5i+SaM6#~7Wa%ojYOQ2Frt{> zKY08Pz8W{Dk@icI-7!DPJg15WMz$Wj1S)i7ie`e*W^iW?P>agom*giOeuOij*@cTP zjjdsHiaQnyqeS$d^dHC*PkSN%!cu=yy$@@*Q%@s*RKyGBEH-A%hIPvD(9cKW#Deq8r^SNmfb2M_8ODQ4%9 z)Jw-MhIE7PNT&P#(Xr`sSL=*nc&YTmN3U#z)_aTfZoe+F6Rh{aCqDbIl~nCDGQWcU zlcnQrIfM|$UT@v_*!0LjvetX6VXFX74&;WHR)7;IT?Ky4*zU3Mo5#p)nm#m9I=eR& zwrt(D@#+m5wr$yT<<=_5EHLa3uT1>Tm*G>D!!xQlRZ)DZVrBDNO;k}(etgLf1VlUG z!}k^s!Z9769GyP2abyHODL58c!|-sr!Q#UqNCHRpB1Ei2jfde8#jX+f@M$YBW5X$s zWGW3kAL)DC3%!>74EU4$cQn$QAottPch93gb3gEjm##nZjxXKzmkTDh?C!ei4Oc#L z{ED`1pPKx`&957Z(JvAIlF6`nUm?#AEf~Fy{nIzRm}Ea`VshkcU3I{;KD6==@427-XWHOV>5uYM zf@6Ik=()FjeDB1bk+6}pr&w9PR>J+RJG=j_5^DrL%+kZmwux9=M@|WRWh7++5jzJuqF_z%YFZ z>Dh7bhEacw<3McGENH@k>HWb=%&!B}mHo>Ddn@-Z#6K(J__vJO_gg|{M?|6f5cvVE z&*B^QA8eb5Yrq){ywoy~@_~OcIz2VAr(`V4m@R%QAUyZiEVy;)SDbg2k02+^mrwxQZKuF%S*3Vw{2Zv z>$*#}U9q8XHGXc|6qG;w>SYz>2ckFAS56>2G(G9{Uw|1nHUdX~y}xFD(`e=Svwe4E z5dniu^nZAvEoffFUr%%#IIZZZ4XrD^-~&7K&(VTU)hKQ-tw{10=c87>|5|JB^VoCl zxpQ;xK!5%me&pP9*4}IHz4m(VefCiqMSmI9qp!GR9o2=)P_{oau9SOEw%>WXa`0q* z_A>nIIKFecQVTz()X{TrKjWy}Ib7dSy|_P;qkGin=Smgxa_<-9ipFv;WPBMeKaG2o zckj*m>Mo_)?^Eh}s;}G}^#jj~{QNYh&t}xiRR@)1JZqII({m@|t6Nd;JsBVU zR-Yf->^t4`Jln4ZsBrEq)We^fJwo{3lO3mWN^RJnz7Bkj^3^#iOHUYY4}Pwrn+(@$ z|DufB{U}IRqUXxbouO9YTHx|IHIzG7QHy!quR+`^wMDh_by)S&)xF2j^)dcsbT8va zJI*xGf5!1M?RfrYPP_B4Qqv33-ayA0s}ErWZ9?ztc9iRJRQ3v{io^3W;K$H}WFc|6K}U%WlV^&{~2Tkt&U4@BS}h{N3n zYv2dn)VfIhI`I?T)gFwdTlixEc&<$R*F|`7#XN$~6)2?dos)_07QIf(jp}nFbO9GR z?dmqbKdj+JXB_-m00xmO2p-{c9|BiEP8NMZ%gGRCoB2AE(QuZdMX%EEX@)PR4lzF} zUG84U3E>&m<&JQ2fn!YHtC`Wd&a@%kBPUmgRI2bjI~Whhr_eU-=dd zALRBvC2(*p!9g5@Co6a!D)e)?qAqu=K;@RBoXD3d6bPT{DGU*vGt;har*M$zdpFZB zj}k4?zkV1SJs^-d36-}a|^*ejn_To{59^Cd$U7&!1sovJ`?h9uOoeG*x=zok( zg{y>4g_|^;#^ZDXoapo|i%xw3oo+UC>Z5)WU8oZ?`%B(k>Mjx`k@L}rO{_G)W%Wj_bT15BKNB^ zrC-b!i@p-{OZaI`3)36g#LS=ObBHYS`GWUZ^LMMvpZ7lLALz;R=jr+LrsI6>-A?0l zls7H&*P`)yx5)f??~wU}{EJ+%{3``G=Fb78iLU}a?}G~9a+;69=X(CGiqD@X^BeFv z`bqOQgnkUDrJ$3idhxH)a@B+9faCrUjcYM9Ak$ZQS|7CVxE$C#>W?~i0pY>?Ei`m` zAWo-+*9x874+@>oZ>CdKeW2+C{<{Tu9YZJgu+|4}*L3=5oKAofoi^EYGWA>DLG6=1 z5IVKoEOc6UJNI9?*D+k^gi2~5(Z&6W`%&^&+`mPCz_*Lh_q%D_Yg|2UJiiUk(eCy1 zyvNurtvAL!hC4@0y9Jlp$-wNEsk{R{jrz_9^nE7B_w9EI-9LW`>Ve^TSL9rI`?DAe^V^H*s&3|fIxM%i zbMzLabVh;+h)OzrhnbQfaTatD#4`|0x`9bC< z|q=^42G>!AC2yx1KVPlOEsFEW=Lcmz zTc6f?&5gr>Px$ydy8bV#JiM|7AHV5;7azl#h}}6(y_|vlE0NyNdO362bIZq>jdSGV z8>{f~B_SVQPvfeSkBwdmLcfyMLeQXTRvH;3(Ld_C0a`BxCI08dwO%qy3n(q+CEZ*nt?`hLY*l zzFGQR%^tSDK*Ntzseg@z&s2fGRp9Qwfi7wJ73fRV)jX|Xab6|@PtNy*_3eC8E&T0hf89LdZ<}ZQtqtP;F3}&e1QBBY)8xU7-_G;_bglN| zc|5o`dC`T`KJJG#ocn|O8|Ucve=n9NeV)#f0;%?YRn8GV+Tpx1^3$n#(#1QpJy@dq z1;);ui5S!Hb=;2#`SAaue5vE48hjYSlld@&+kE(g8u&Y?zmfLq#Ls!nh`+BSE+e_* zyl(bdO_yJYJ}oQ|eWSY3E_7$xdVTNXp7f=*BlyMnWwqSs4aq~Qp1jPv%3R9&L+2;6 zYqDZ@e3XYxLO64RGX^E_jKph;FcYkAm*sT*oQ|JzI|PruUz00xKU8il)d!x98opWd z7V7it<#fIySEM*?5q!y`WM5XK<98p;bKY~|)7owX@!Z_nS@?6^PN&sQK$j+s$JgyF zwc1g&@WV49`l#%ALa(f*S60&t>qA7ZQy71{^6}qGU|)YBeB%BW_#jO$OCCPdyUF|- z!fko@Q4RbJ>Sfg~{E9pS`v>klpkImTk;zRHU4|UCZvmedGjgw-4vEKSt?x*^nV?)B zm2>}})?d_KG;e|RETV5mzm_}FL&yWD#Z(SglB-_TM&?(Y1G(R?^BuF~%*=b{Jeg0C zzw8?LGla*79I~8cE@e5<>l#Rl^urRnD3N75`{OM8OUR2#zWyg!&V#x?DX1eQ)#~1Z zb*DQ~ni86+9^@^Zh<{5|cLZLjHdO;qcB<1zJ@#j?b5|L-;1U;Vh z!k?fE%?sR;B8^MaFO+A%xnElUs5m{`N3%T7p*#ok)S=tyOKT^TXRDoX9dU!k!|OH5 z&>#9wk7rKpEb@&!TkWV!bMT8igD;@xk*Tn{ z{9|5P-!JpzA@4%*lfm1H{PXpw=TRP(+5;!)JT9+)t5)5=wRnCmfwSkU3+bBKY0H;S za!i?Kjx(}qri~p`CRs zZ(1%@AJcz1KWD;lzBkZI&U(>HqGz*4&*qHXa7v_~vI^D!^G^D@M0~MK&!hI+Fi+L= z&v|v}x4{2O$M1eS8|dpk>QDYz)7SNl$FEyo*N6WU#=k=O;5GPPi=Pty)BabeN9=tT z;(vwkWPS_b7QeaTe}!-xKj-C-Ur(wPGXKxA|FteyH!}Vg=*xOg{4ZKB7I~(8aGE^J z_(OS3rf&$h=<87)FU`(1NMFw*{1}ai+ZiA3a&wHkkJZqR3JQ<(u!+BioWaaE>^J#| z@1koqwm)g{cQu{izp)-;{^WfO&|@7Z^}@zr{}cY$ukPU=z4({x9OBQ`lSD6hWS=AR zqD0SiLOS{teB@dCjW~}O(aUwtlUjiG$lhX{tXBj7H*fskeAF)G{>$)F-wyQws<+`1 zwyO&MDC}43{VdPR0m!Rf1@8+pLgQQ|cH1c;Z#lqrMeBFt&%%FVdS_Do)Yc86kDMXK zL+j_W@lZdYOG>0yJGCBOm?0*`5;nPsxCi7_sxAckXgz$h*y(P}m-;E!~{^Ww2yT7d3%>G31>mpQBS;(d2l>+%% z#2(nG334cYd#DfX@sXSj1bCEdjn7*OcpCj0ou6ej@HZHDXgWi~#|`J-rl0Ja4Bq>~4N@O^j_b$sVr?JS?UOtK2jpv(UQQSf?fatn5A$BOUsWq#Z>@KDD>1$j zwXgHAWcLRm_$@v#>xYYB-`Nhsj`t^FUkWRVt)rn^*TnCcyrMvO<&S) zn2(m%>wAtTse5`qa+l)^-_GX4;sCWG`Nq~6&9_Un+ywC*;%xE@9@F!!`MTw|Q1u+t z@6zQv&<@RK3zZAs9aw5TmK|8NOsiy3FLvEQ(l(!LP7F-D)UOw4=(ta>bZp?uD^<-K}X^@ zm8mbz^{eq^lkyIyX#76(Q8e$V?NXTc%$oV-_d;~Olk5ZKr6}J+`A#qu=Q|hTO8N}U zq3uU~c~6RX*zVNKdp4Libbror8Ttpd-H~5Knw^rV>LH{N2N822ZQ%v47 zjL$f~75cH{53#)RbM1G*A08k&fa;{ziBH?J;y0Ll*uqRv{1Zp7i=jNp_pE$lhWrzn zKh_^-{y+E)w=!Qy{!%&Ge@LxQ{KWOqKzhFNY%3mz9pHG`Q+Qgm8;L^!P}QB;MM@?S=7x&5yz_ zAw0Rg5N@|8c~Zse9MJ#XIS|njTE}pYD_@%VRlZd0oX$st>s$Xj_551#Z{2&4*Csn2 ztnWIxX_9C1w@CWTK8B*?m%W!5KfvMjTM|!MM}VFH9^6a)nIIp{<)+Dw48R`}zYgWH zN$*=KRg`@W5KP&ZUX1Tccfq4&^0z3(PyMX2FTEvr|0Q{Ux;p@NCp>x|q&ukb2KyKa zvj1G~S8wriMX}qi-f!XR{pV&sI^NHMUJ3W97xX^%yx!NIKajppUHs3svhGI_dg(U! zkvEB5ZtMzEn<08fTl5l6}cD!+sya#L3u`2!Ae%Hte&r?8e@43$7HRz|AeJRM}vA$_N66EoG zp?j4)UJmmDf9Fi>H-TKyItJ%&UBu^iLl1Z*nzu-P&E^B?cW6(O`7DInd}8cvus%)s z$OYGToXL5^a9!oR8vST+U4_IBavSo)=6HL-dO@q2m%KInN%1>_eP5Z)di{DX{vh>R z{4=7%9>n7>3;28^$LF}e{v7BF$scwGaku0T*HqOb5_gC2UhnpPjor;+_&%0TOQwHV-@>ou zM&~EdPm!ZgZfoh)2I*9DeZ;n7$>R&>(H@_+?^q8p`_|^6GvP^-4rXwUpzHPKV-5TK5ybfZk^a`Mr}h7S>Jh zwl3@I=bHX%Igfh<`jxCF8=P;og6N3xo-gZFWas+GPR(t-D&*7D`C9OX&Zpw`Cs^-X zj`<*Yl=&lked-dl|5BE-(9YR%CgTp_$$S{X<9xWB#FW>~;&_%bXC$qgalSKRx9#>C zT{mlR9=%%YX43yqF75u;TDQ7@=Offx_Ixy`_q+=UKk-2<&rj>-BB#c$HFl^cBl)5p zhx6c6jN`QA{1A3KX?u=xb;J|v=DI%6f(-UilAD(?okBWWbecy0Vt#&|^Pu$+kwxAs zj=zT%>NNy7qz7BRuKkvPZlKp5MRLY|Y0Kj=Js0|Gwd_oTcA%!6A%0#LiHGW?6W3=w zmTqT+AG97rd*aXaSpHlX*IDx^=g~ys#^ip6_2YU-X@4$++xUgfA-qof+MmN@i+r^F zruOH|zQiu%ORpwom{Xn=zlKdcmHTz80N_puwv9muwaN%nTbY}>US%)(DqZMo~(FRh@)P4u@ zH{6dlPjxMLUC(94Ul2Wj_Z!=J{L7Gq;(j}G(I><&93PPUIMi>F7sLr||EKCzYr?&bh*RsJ9yV zoVVjHr(~b+v$YOt-rF*MwUT_U=VRPy`D@|l(2gYYa|pNPrH}CQ_~kt~E$2brgY78b zh7Q`_4BmsQq5r+->3ovMdBoZ4!`OdT&wFmI7wP%b{ITFSgg@L*nQ>_TFnJu)e}9ha z*(d7$&hfn#ch5WfdA~7$ zSJ~GR!0r8EiuW&JnI3sr;Um3YbCpx%eJ!i-o(bv2VE?A!f9w~6ztZ+?C=$@wy%**0 z!u|Ip@1_=JjXu@;njk+kFYC2@slJVRf0DdEp)$N~*uIA6jrr}qlO@6eafpUUjN zF#6N%uUV`0>6SUlq0t+%uVl{oQ}aIP62u(kb5hp*8r_E@dN-Umd)~$W58;;HRYu?I z{o)BY{+rMKOoeD6wCRCG?|5`H83RGdHAf-;yR;?WYy(|Lr>IUOhW zlrTPo&@Gd(`!IiYPx5oh2S@9ZJ3;iIyhY?W@vTz5Sbo|@&si^I>E4c)z}<@8^P%&a zM4t=3s^?xpJ|pzqIM+C5!3RVS>NuxAn0H&xfUwAeh45s43*oc)M)(7xmi3OS&A2rG zJX_;^SdVuPwI3PJe2;f;WW2<$Brol_P#nB@?(^*J)AKBP>5N(LU2HV^6M7iyine~7 zHtz`-{Ydwuzb*YZSH27UE7PAj?!OcLh<53{gMp5X0pHntW9&0+G=;1Z-6Y;{=-iBz z`~dP(<=hNtD)H;Bb2GYw=kaqh$lh`s;Kt9*Ai3qdh5MZvd?R`}l!H3uq$YmO%SHIM z+4xzH&Le(VFFAhtd^aLTWbaKpKypOyMVh>&i5J8!gy&|Mb9RSyd9$v1qukGx@7?(M z8Rnc};x~uS83x{Z&KX_;SuMj3d86_1&9y(4eKq}IzCE6EkC)u8`!iO9-{)i(_`GNG zW5ap3KDm_kLT!zchGuHe*8ekhH84&yGHPVeP!SJ7V-D7&xi5TduOsw$H`0+ zm#eJy_ptBgF20xNGZ{MnK=v2VRXMzm$2+9$mPd4I{9NuO-wW)V*aI|_BYzJ3Nc-;H z528KfQQQwny8sAZeOQkZej@7&O)u;=m3vt?RO&>zcmGDpFL@C#Lq401|F4^jpW;2@ zL(~5+HyOW0AFn*m_^)g-evWS<^m(eu_-*=}G0*tVYBGL{KHi!0jDKU3@!Rx&VLE>A za?uZM$CBmS!+ZS{S5<@8tzRtp^meBAuWtNnfG;Bd-mWI&XMa7EAMdIr|Blgmc55*4;?P;Ohu$30pDzr+z?{W|eCY^P{^5jwe_ zi_r;WYS7M7zA%da;U?p!{CpJu=bMbbH;&&MZZdw0KHf-^@zc6Qls=C&89%M#MDhPs zlkr=6-u>eI;>Xu8*bi=+ADD^L$Gf7*_$khf(*H|w{BAA%M)~P=5&c)E{_%c;+Kbe; z^r!b)3tlJwhV_s3kGaM`HLXf0aYLPj-y+%I^0G-f>zG z_gV9x_dA5}1u&8PIo4kUvt3-(lihtaEe`JSXwB%AYUm*7-cYm*(IcPUJ4|C*Ly&zu(3CITjK-hxAb@K zCpu$3^gf}s>v=wuNAX^%JA%rK(R{(FjQ}Eh!uNlYa{RnMX2Lhjv{-XDPDNj%O zjQB>@$;d91I{K>iSN!G>p4{IMZs{|9?n*G8S?A-(dp#lCt`B~V=AXO;7QdN1;uV% zZ~I}VHW|MiZ=RBl-+N^&-!Pm1PoVl8r&Q&C-ft9vC-Z+;-{N1d7JiYhuzsERZN1>( zeNxJ=;7|5{)sl~En z^Vt^vx&IpHzo5O-;`rM^XOiClezx4>-O7X3YJ-)#o|c4)d7zhPf}hW9Ya>#`_X<9Od4R_bl$Ber4tR(?br&!4|@HLIk={@&oe=ke#Z{7T8&*zQ? z@!R}juUnqnWc)V2pVVah)_R{uab&3hdA0eG^S}{)+}33Lc3%7&O~!Apm;Gat@!S3X zdXw>6`o?{{$@uO5|8p|__8Xy@BY7eF{hISy8NaPRUT)#1I28NXk9&S?{r9>i1fEQ%s{4^^;-~p+0RJri^uD-%6`TuftrILkoWHMjM^B^@9i z>EWInuQO5{L-Ey;Uo-KPIfpO1mgBQ|S{Dq~rSv)HW?gC)edd^F4DuhOFQW2x5%CMg zyH)S&{ZqNu{wuxTC4BXLdLH2$o3BJZLcXb&uWI3Mly7U{Z;)^Geu8lPHhsv?s6#GP zm#lludhgVZo9%UT^WLH@A8)84A9b#mFFB~?!^p*N&ytIio)x*!=O1Xf2-iVn-Msod zfPM|XuNT=zKWp8*$~w)Gk5r+5z540BRrJ$mXU)UB>La;Nj@U$mop3GOqhH#{fKe&2eU{MO*SK$O<+6VCNLG&lDCb?3L|`u=sttE2B< zcm7MYb$#hy1fJ-3MBtX+4|4GPTDT^^e@*!-dJp*78aLh(J_@~I#b@8=vd_U=Wqjrz4%wj_e8umQ2TVx`yG}NfZ;jC z>xXso8^3oL@Eg#3#Sa~Sr1h98@b}v~?$h}6dDz&$Sd?|2f?kg) z=yR^kx!10&-ze9OpNnnI%g*cbv-1_bE+hLL$$wu%lc&G?0!HTV@91;Fn^~1SR~W$ z5f5QMCeI_Eb6qg!m?6&Q^RAcUJe4x!2*u-kYw@LbJ>Y3_CiO%8lg#%ayk7iFzq!k? zJ||P*JKO!lU$f!^`ko)o?9k_%!wT#901FadwiKc7h%Q4!U)36cpTK-xt(!t>2eb&Lu1_m>A-_;Crx;v*>qnGS7hfZv+3` zB-euwB=^SvoV_deQfvD!se01>q4 zt62NyJKxJ8-`5je%|1dp_m}j9%FH^?s7U(7$lsq>?H9Ryk1Fc+WS_A)4_0Mj-(3&B z`@IZ#50XQCH@BYe<1YV%)&3B-@6Kw!TD$eO_qkYmS?C8}-GR9STk-ic7&e^`qn7IN zm%|rL|-j2^5f9CTQKtYgS;=jN6&}xU#5*cv?_=5!f!=f0N53mhcBBNpQgr?f_cO2`$@;TiJ!b4iOFDk@U2(Dxebk@hwGW=v zPC%D=vInl!URC{{)%K>aRL{?u#81Rtd=GgOdxyNi?O?~xX*~wG=y}~vF6)z?Tee#G zuzo#X7=qn~JmrVP9u%~m&ku>7&u=9?4m}^Y12cggz(WjXJ(>_^5ggG<*1c)sra!Dq04hrAB^L)iCAbR$QEo_8V#R%X8hc`=EH z*30^xrX${$m3_zgER6&8j(49hf0u;XbI@LO`)<$kJlS_1l($#({P#p7CzF_Z)k z^V#xC)A=l~vYgMtNHm}2*6R1F*UoW2x_Pf!zJHS*Z!hJQk5e&k&rN)WY0~;npOfGD zYC|u5PJY3HR}5Uv)n9(2(9=H0)O^nk>O|hVVf}`AHu+ek@o&-lJ?0vJ#?E~X@*vgx zWRefg!*-f=Z}xwQKe7Ghso`hTo>o8S_k^jZMaH_y2`){O$)cMOwe{-kAP=&GN^MKFc4k{p9y+mVedA zpSGL-Wb~O?x0&DfYdX;$@wNB6nhqaK=5y~{%fFy?Fv@?dapLdQ=VgBpyfMVXg?x9zwgykdr5L%H$6^h4m~=Y@t|jmcMM$|E3!4KWgcVz^_e?M>=nAc)mFv>4fiEB0K#Y$0H{Hj`MtrdL7BUx5$2PwfDF> z?-6-ITqkn!miRmd&u^RcT)=X%|4`c{+_QgW z=4oYz^tZ~sJ>oJx|D^7D;_stlp3>Jts5{c{i>2T4y_A)2uEw9!90x|=$^04CxA~Lk z5Ty&YQ?Y;RJA)jr6*HUH9S% zkAnOM>O(zj)bP#ZSJQ;@J+^BVsXz1{Klo-Pa!^MJzn_lZeKgN@Zsi4Lzl!m%=Ge{- z&7Yd?8RTPyE=?ZKkRH#FjUV67VtUCsFyfkaY2WD+dKK1dc&E?{dW*_~?}T2^LqxB= zj6V-{()&&{Uzv4Jy&k#ZVXY5LK4uH@RG43^BQn1r|J*NrLpC=}?G$w$b+|A@{fewd z3VbN9^;7-~$xmm^dZfT-qU({f#?b(Nm4#m6{Rh0SgyxUOz2XOkub?joo^$H?&K~gSeJ5^w z-^ohoYo7ltOtj+^7{7VZesok#(lJ z;{iM0rt_&Q-_#`hB&X1ac0P5!@SA*U)9~B$(fQP-;kW6d^Qle4Z}(s4Q=5k0eovgv zg(zXZ6W=R^p6B=o_Cc<<=Z~2H8h%aG4d3`g__LuhGcA!2Csl(q%?m8lRZRNdj zdB8F%>pi9OtlikZbd==Ls;ATm8LaqW12($O_MqhOy>gb4pR)4ZT#xJnjSs;L_g{H` zfbu*pj=RWiQoKl|`g7;vs`UW!7wkunpFd6IwIAUg({Wc0^cm>>wD#vw01QF?%22hq z3ky^P_w({TL>}wz6X@q(YP&^UBzT?gL9Bcb!y)>t!TKfnVSlgjJxB4@%5UiT(sTqp z<$LpV&+FY89dE&~Vcxp&KhLiZdXk^t64-Y>Pr7?t_{4jAFmHT5mfD1SjtgYIyRX%D za+A}~{0F~L_%O%((2e{j(4v5VeD}(&*NEB z;CP{Hp{Dz}Cf;z@I_Nu&+coRImot9lPKh54)};A%T33lV_fOaF#y(4?+cf$yK=obTkJ!D<&{NjOyS}IU-2#z9yK2a+XBRj>*?m(`Ug)N> zX9zv=+l3ye$Mnc+dI&#vL&k|7-XBU_K=kk9aUi~eoCodVg&&!ZEXzR`%9N?xxuzeP z6Rr8s`&RQg=|@I?Hy(V*{mAHkK-0zMMZP;0oZ|*JROp{MLFNVT3-G*TIX;G7(ChDj zV_v$pNWX&l=+k^p`w1`}nim;gW(W7f&-+664D+d;j~0xK-=FaQ5OK8TORg8l5B3ug zzM`Q!?k}YHN#Y|DZ-Fav(=^_q#7U8SlRO_X`+xMleW+1H*wMU{l$(i{n{LIp2D(3E z=||GXqHpWelU;j^9D%Q?zjf-#F353-_(AK*uEQGNbEPLCS4Ytw?@rQd-H%j_$DlV= zR|KBu*GAx$Un_BI_ZO<-=eQ*TPsShCxA2SK9ce!)-W!O(?fRCTSP8l~G*9GTksblR zVf+vEg2#D(zLWaRx&yroEed%Qx= zuzsERRVJ{1^xg#dO&s5LeOdFX3wjV;a!x(*E%500){Dot-5=6?xP`~{*{XE&m~J6F zd0ZjfqMPyOD6hi(_awgUI%e#U&KDVfA@J)Y{-o(9{ciUC)g6yn=aPAM(|DmDKcM-o zQT(fcZvgXR_P4yV$@uMawZ7bB{3LIY{=0uO&-nj3j{mslmEb)n9uj%}9?>yuG z<9Wt^IX{lAd#V}7*$Qj8CG4Xt1IoIBU`>s2Rfzw%`~&U32rfs8YRC(ly|xA=hA zkm6BXWnF~Nos7(j%@>W%+tn6b%{s}mMb}F;UHLqlo{%od;|$@A()EKpkMkzah-Wyi zaYUaW2-5mR^a^e>oHtyB^|&&{&2&CliRNAL_c4O_B8Pj*zpSlqpN5D0HSHIhruUl% zdWv)AbG<%j^Pw3Jtt&_LBk51pn;!cY^EiLxpK<)mdH8^y9)HItrCqKUf!E+0Y_pJd z{7jL?>3#rB$-MA;BRI!|pJUjv9$?P#@MCsGsbk0=AwTAQB&**)1D-CTR#puGulJ`p zd5-s|%E)ScmYwP@0VKMp(J$3`ijSpg6kJ zv#Eyu4dKc29l~w>YwK@Y4-tGI!cSIx?>_EdpjYTMI`A#+Bk`Tz!}x4`4eD=2^^r?l z^?jia$QrXB`X0Xztr>Ms`P}K!n}p>WO{~hn;x8BiqyBq&w4TfuM}-cI*< zm1Dcm+lB6duifvi(6{9^7*)K`nnrgfU0 z>oh)<{e3ByeG@(JJyGPxIhGYV1?x$GqYq>_$X;Q;WWV$`J4tjk`y_k&gME^^ez33b zMyWq*pX8M6`Qm@Na?q91z2d zm+dUWRd4U_YrC3{+tuDX0zH&yA0qHXJr{vndQ8@BB5)f&+l2_cPW++&h6#K*bRqp= zo_pb$l}S&CUju45_1IP5N#2*(d!z8H*wx;*RizuRM@8Vt;|l9rbPM(+=Fo5Q3-rE3 z_y_DK^sY2^R@Q~V*Q5#PuQ~SB&NKfx+AV>uZ$$${SF$gStP7d-nol$tuibCG|F3EN zCO?q$f$9I5P42&i-|YWuTK{eO=>309!*BK9)BFFLhTrbL-v2jm_&?d?`K`-ug}!G0 z4{~=W$sP1b{Qe!*{X4DuJIy`oh10cu%)p%@gDLN8NnYerG|ttdRrMK}n63ysQJ+QN zwm#GG4N~3O&YQeMrcdm#Bj_6vae;5o4m+SqC_<1ceASvkub&~9l#Q7!SXBN(5e-i5hF?@wMKJU}WXJD_z z>a%#< zNN=o*@K4?Rh5F!&)!27;6!RwgZ&vTC%Fm=n)VqNfiHIG#S6G_q9JVZ~dQ< z{wL=XFe@}q5&y?~P~-{#d9Re-4<$KSDAG|w?l zUAo-q82@#uFV{!4K!4W9rX21~IpoGxPovxkwaekl#*3-m>YGrW=Cc;wH?u`it8YQO zr=ec(onY=`%0JKGuL>ALO#h?0^mk=>vT}Z;w9$PKkxI0kE9jQ>e#o9hk646~K3ucTSB>9NtnHpNyjM zqBjY@_l~8rP=J5De0X0O;yZES==gz<{sDZVGFh28SQ*}Oc(lB4WT$EB%JQ&(<>9Hy zq+ih^F%Y>9Uz^_k@U=-)32{_aTmbz1tScU@0srUGvC&g2`^KjZ`^f`Ht54RU2HXkA z2EOz6R3=6%d;Q7r@=i020i4O|qdWe!m5H%nfV(Ef_Eo9MWlXR8XIG8E*Vlk=r~!Xr z75Gcblm2*R!XH0yCCeEE?3#(O(cNYM!T74wf5j6|US-P8@wzrz?&8tChyAS=4xW0} z`VIch@~D4cvO-Ory7l5xv|QqENTZuRfzJT-`ZLZL@W;y&<$c!J2&L>Z;A$)wHR8W9-4lZoV+TiuE5n1rgM$+zJ1b}FV!O+egD4>o z^Du$}b*IL5j_vgijPIT(4_ExDLzt#1f7i&wBx$(|O4~2E;^Onq-@bK6q*nNIOKC^x zifvmjdg+b}!iSMx@Ntj-ujMPp(kka3x<3sJxA*(UM#e&2om6=0(2lXG@?MDXwvl}! zv&&9Y_8%CTs0?3xurhH~r95oo7=!SaVR#Ju{IQ806XnrKSfi1#QM(N{ zS+}X;`9G5jcU^5&ux{Ud;`hI3J*suPC0YQ!hGw1%M|X|+yV#wK6aasnSJ^c-Q5l@5 zkdClNy?4ETRe7?yiYtweS4PqD1N+ z`hVFZrgyYT|HsGD?Jw$M%D1u?mo(u!q1;MWj#TXf>OY@?xN3iZ?-V54Kc4=~ zOj4cTS=)I<@juaXvi}G6PMz&f9@x3FGHINs+3+}>DZbGR5}=L#(Ee-HBKr>PJ=@=1 znc6zBdvZHiVG7i&S~!XrzY+gU?;AymE#+^bvg^R;@bf{&WGWYrwxz1r!;|T@Y0r2% ze?+^MG*>I41rmO5hjjS+FdyaJ(9MvK_3=Wlbl>?%{7KEbm=25DAGEY>{VUK`Wpp=O znr$1ljqUEKfi6M67o$}UUR|B{soZ6u*_owV2)=Fo+2Bl?!Fl8n z)!>i)m4j7Umd6)s`*QN6Kc-C(_u{IN-K0W)WYOd>FI^3 z@sa3oXZ3Y540UF~=)wML_NNL+dGb(VJ@b(C3+n~*z`Bn9;5W{`|G)2g;J`Co`@j8z za&FPUM|Xc~#i?IiIe7EWFWdf}Pk=vd7R^Ob7*dhlPU;EZV&7g_-a7&_W8#)5j%vUE z^T~VuF-hj}?B}KqT=3Mt$@0Ol5`;&-X zz5kMrBu!g@BC6k};M`t3F|vCk;+I{$CtZMd4I_P>@^|?kUK-40Qn9do(PDO){-+-M zjQy;}eT;uY{N!o)+Yci}vCrQ;Hab-~MBXedgdOi+c5VBR^!mclmqPm@wx!VEz{u#- znHx3ID15ed##i2IG`_3`ytM{=aSeFB2K=(oJ)>jSj8fP$b$EQXbZ;wd-*LtHmu}s9 z#rAC%@2G*y0>eM?tK{zj9X^?!{!!)0RQ0LK>EX8;uVJ8q{8f;shz}xw?<*fdU^zB1 zGIbdFe8iOC2o#ONqt6YN01ibH2BPO7BPM1%3Xd6fjUa$eg*hREo2~(z5=Bvf{{7&Z zC;f!S!J|-qC_j!)`r{Nn2k^Td{;~I`fBEVyH{AL6H-Bp3#P&U1m)vymgGVpwxZb&D^Pfk@k6EAli0nMF6jS%g%@O96TDIsGO>s4w*K9rvLC?@Yui6Mv}BYDw77s`a(Ez zf9Ke~@x7I(leD*-ZeP=Izx%fC@1?=Bx32wkOIiWIs7zXs&NLukZ8hMjlUt+i6F*$} z?fo_UU)erd9TC>JQ~z{k0_p{vK9%Y!&BF7oyO!S)s}J|nF0^guvZw85(R=Nt!gJN~ z83oq({iA<<=Sx$I+;DcfalGN|q0Qh(WHu(rK_w4KlJr=L(Mw>&u>>ERVDPL27p>i3 zg{(pzxV3b~T|2b@np6kz;FYsT78_FKn)LYd;8ePUV8#^KK?+f?90}GR4kl*B!Y&+~ zIuPC@{5m+59v>dump;A-|Ey2qzba-s=n1(EF@qdHb_BhE@;gws;?=cZ`=XNrJV`$`edC3fBt19JzVp-9+-iO&UGI9u z_SYmm$1~qNkIheleE<5(x4-(L%~xzL?bv+56&GzSU5cOEwwc=48F<}_RQX5tWa{E^ zq-Ulk{JwJ_L8BD}+FOD(>#Il7`G4o0bP~H;#)1H2H?xfFsB2!-Q{{gh7qw@d& literal 0 HcmV?d00001 diff --git a/etc/upgrades/1728066632-protocol-defense/common.json b/etc/upgrades/1728066632-protocol-defense/common.json new file mode 100644 index 00000000000..4011159e2dc --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/common.json @@ -0,0 +1,5 @@ +{ + "name": "protocol-defense", + "creationTimestamp": 1728066632, + "protocolVersion": "0.25.0" +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/crypto.json b/etc/upgrades/1728066632-protocol-defense/stage/crypto.json new file mode 100644 index 00000000000..65f8a3cc066 --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/crypto.json @@ -0,0 +1,6 @@ +{ + "verifier": { + "address": "0x06aa7a7B07108F7C5539645e32DD5c21cBF9EB66", + "txHash": "0x1e14eaa49a225d6707016cb7525ba3839e9589c0a85307105d1036133ce6c319" + } +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json b/etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json new file mode 100644 index 00000000000..7389360d64e --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json @@ -0,0 +1,198 @@ +[ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x90C0A0a63d7ff47BfAA1e9F8fa554dabc986504a", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x81754d2E48e3e553ba6Dfd193FC72B3A0c6076d9", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x922805Cf0C00C9A19C14603529Fb1a6f63861d80", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xBB13642F795014E0EAC2b0d52ECD5162ECb66712", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 0, + "isFreezable": true + } +] \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/facets.json b/etc/upgrades/1728066632-protocol-defense/stage/facets.json new file mode 100644 index 00000000000..acc6456181e --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/facets.json @@ -0,0 +1,18 @@ +{ + "ExecutorFacet": { + "address": "0xBB13642F795014E0EAC2b0d52ECD5162ECb66712", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "AdminFacet": { + "address": "0x90C0A0a63d7ff47BfAA1e9F8fa554dabc986504a", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "GettersFacet": { + "address": "0x81754d2E48e3e553ba6Dfd193FC72B3A0c6076d9", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "MailboxFacet": { + "address": "0x922805Cf0C00C9A19C14603529Fb1a6f63861d80", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json b/etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json new file mode 100644 index 00000000000..4ebb6009f3f --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json @@ -0,0 +1,394 @@ +{ + "systemContracts": [ + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd" + ], + "address": "0x0000000000000000000000000000000000000000" + }, + { + "name": "Ecrecover", + "bytecodeHashes": [ + "0x010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b" + ], + "address": "0x0000000000000000000000000000000000000001" + }, + { + "name": "SHA256", + "bytecodeHashes": [ + "0x010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a" + ], + "address": "0x0000000000000000000000000000000000000002" + }, + { + "name": "EcAdd", + "bytecodeHashes": [ + "0x01000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b85" + ], + "address": "0x0000000000000000000000000000000000000006" + }, + { + "name": "EcMul", + "bytecodeHashes": [ + "0x010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b66" + ], + "address": "0x0000000000000000000000000000000000000007" + }, + { + "name": "EcPairing", + "bytecodeHashes": [ + "0x01000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b299" + ], + "address": "0x0000000000000000000000000000000000000008" + }, + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd" + ], + "address": "0x0000000000000000000000000000000000008001" + }, + { + "name": "AccountCodeStorage", + "bytecodeHashes": [ + "0x0100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e1" + ], + "address": "0x0000000000000000000000000000000000008002" + }, + { + "name": "NonceHolder", + "bytecodeHashes": [ + "0x010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c73" + ], + "address": "0x0000000000000000000000000000000000008003" + }, + { + "name": "KnownCodesStorage", + "bytecodeHashes": [ + "0x0100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac9" + ], + "address": "0x0000000000000000000000000000000000008004" + }, + { + "name": "ImmutableSimulator", + "bytecodeHashes": [ + "0x01000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a" + ], + "address": "0x0000000000000000000000000000000000008005" + }, + { + "name": "ContractDeployer", + "bytecodeHashes": [ + "0x010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee" + ], + "address": "0x0000000000000000000000000000000000008006" + }, + { + "name": "L1Messenger", + "bytecodeHashes": [ + "0x010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e" + ], + "address": "0x0000000000000000000000000000000000008008" + }, + { + "name": "MsgValueSimulator", + "bytecodeHashes": [ + "0x0100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f" + ], + "address": "0x0000000000000000000000000000000000008009" + }, + { + "name": "L2BaseToken", + "bytecodeHashes": [ + "0x01000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4" + ], + "address": "0x000000000000000000000000000000000000800a" + }, + { + "name": "SystemContext", + "bytecodeHashes": [ + "0x010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5" + ], + "address": "0x000000000000000000000000000000000000800b" + }, + { + "name": "BootloaderUtilities", + "bytecodeHashes": [ + "0x010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce" + ], + "address": "0x000000000000000000000000000000000000800c" + }, + { + "name": "EventWriter", + "bytecodeHashes": [ + "0x010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98" + ], + "address": "0x000000000000000000000000000000000000800d" + }, + { + "name": "Compressor", + "bytecodeHashes": [ + "0x0100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576" + ], + "address": "0x000000000000000000000000000000000000800e" + }, + { + "name": "ComplexUpgrader", + "bytecodeHashes": [ + "0x0100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d" + ], + "address": "0x000000000000000000000000000000000000800f" + }, + { + "name": "Keccak256", + "bytecodeHashes": [ + "0x0100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b" + ], + "address": "0x0000000000000000000000000000000000008010" + }, + { + "name": "CodeOracle", + "bytecodeHashes": [ + "0x01000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e3" + ], + "address": "0x0000000000000000000000000000000000008012" + }, + { + "name": "P256Verify", + "bytecodeHashes": [ + "0x010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a" + ], + "address": "0x0000000000000000000000000000000000000100" + }, + { + "name": "PubdataChunkPublisher", + "bytecodeHashes": [ + "0x010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db7" + ], + "address": "0x0000000000000000000000000000000000008011" + }, + { + "name": "Create2Factory", + "bytecodeHashes": [ + "0x010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf546" + ], + "address": "0x0000000000000000000000000000000000010000" + } + ], + "defaultAA": { + "name": "DefaultAccount", + "bytecodeHashes": [ + "0x0100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe30" + ] + }, + "bootloader": { + "name": "Bootloader", + "bytecodeHashes": [ + "0x010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b678" + ] + }, + "forcedDeployments": [ + { + "bytecodeHash": "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd", + "newAddress": "0x0000000000000000000000000000000000000000", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b", + "newAddress": "0x0000000000000000000000000000000000000001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a", + "newAddress": "0x0000000000000000000000000000000000000002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b85", + "newAddress": "0x0000000000000000000000000000000000000006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b66", + "newAddress": "0x0000000000000000000000000000000000000007", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b299", + "newAddress": "0x0000000000000000000000000000000000000008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd", + "newAddress": "0x0000000000000000000000000000000000008001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e1", + "newAddress": "0x0000000000000000000000000000000000008002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c73", + "newAddress": "0x0000000000000000000000000000000000008003", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac9", + "newAddress": "0x0000000000000000000000000000000000008004", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a", + "newAddress": "0x0000000000000000000000000000000000008005", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee", + "newAddress": "0x0000000000000000000000000000000000008006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e", + "newAddress": "0x0000000000000000000000000000000000008008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f", + "newAddress": "0x0000000000000000000000000000000000008009", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4", + "newAddress": "0x000000000000000000000000000000000000800a", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5", + "newAddress": "0x000000000000000000000000000000000000800b", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce", + "newAddress": "0x000000000000000000000000000000000000800c", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98", + "newAddress": "0x000000000000000000000000000000000000800d", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576", + "newAddress": "0x000000000000000000000000000000000000800e", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d", + "newAddress": "0x000000000000000000000000000000000000800f", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b", + "newAddress": "0x0000000000000000000000000000000000008010", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e3", + "newAddress": "0x0000000000000000000000000000000000008012", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a", + "newAddress": "0x0000000000000000000000000000000000000100", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db7", + "newAddress": "0x0000000000000000000000000000000000008011", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf546", + "newAddress": "0x0000000000000000000000000000000000010000", + "value": 0, + "input": "0x", + "callConstructor": false + } + ], + "forcedDeploymentCalldata": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "calldata": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "tx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 25, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "delegatedCalldata": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/transactions.json b/etc/upgrades/1728066632-protocol-defense/stage/transactions.json new file mode 100644 index 00000000000..fceeb9bd407 --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/transactions.json @@ -0,0 +1,253 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 25, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b678", + "defaultAccountHash": "0x0100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe30", + "verifier": "0x06aa7a7B07108F7C5539645e32DD5c21cBF9EB66", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x6704ae40" + }, + "factoryDeps": [], + "newProtocolVersion": 107374182400, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xc11816734C1961ed67a9e2A34d9956eF8d03AD72", + "protocolVersionSemVer": "0.25.0", + "packedProtocolVersion": 107374182400, + "upgradeTimestamp": "1728360000", + "stmUpgradeCalldata": "0x2e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000c11816734c1961ed67a9e2a34d9956ef8d03ad72000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000e80000000000000000000000000000000000000000000000000000000000000144000000000000000000000000000000000000000000000000000000000000015e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000922805cf0c00c9a19c14603529fb1a6f63861d80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da43000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b4408284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "chainAdminUpgradeCalldata": "0x69340beb00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000033c4fc57565f000000000000000000000000000000000000000000000000000000180000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000c11816734c1961ed67a9e2a34d9956ef8d03ad72000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000e80000000000000000000000000000000000000000000000000000000000000144000000000000000000000000000000000000000000000000000000000000015e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000922805cf0c00c9a19c14603529fb1a6f63861d80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da43000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b4408284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x90C0A0a63d7ff47BfAA1e9F8fa554dabc986504a", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x81754d2E48e3e553ba6Dfd193FC72B3A0c6076d9", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x922805Cf0C00C9A19C14603529Fb1a6f63861d80", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xBB13642F795014E0EAC2b0d52ECD5162ECb66712", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 0, + "isFreezable": true + } + ], + "initAddress": "0xc11816734C1961ed67a9e2A34d9956eF8d03AD72", + "initCalldata": "0x08284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/utils/src/index.ts b/etc/utils/src/index.ts index e64439c53fc..320f9a3a8ad 100644 --- a/etc/utils/src/index.ts +++ b/etc/utils/src/index.ts @@ -26,6 +26,7 @@ const IGNORED_DIRS = [ 'cache-zk', // Ignore directories with OZ and forge submodules. 'contracts/l1-contracts/lib', + 'contracts/lib', 'era-observability' ]; const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc.js', '.prettierrc.js']; diff --git a/yarn.lock b/yarn.lock index 531f49abc00..255bd901e03 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1689,12 +1689,22 @@ resolved "https://registry.yarnpkg.com/@matterlabs/eslint-config-typescript/-/eslint-config-typescript-1.1.2.tgz#a9be4e56aedf298800f247c5049fc412f8b301a7" integrity sha512-AhiWJQr+MSE3RVfgp5XwGoMK7kNSKh6a18+T7hkNJtyycP0306I6IGmuFA5ZVbcakGb+K32fQWzepSkrNCTAGg== -"@matterlabs/hardhat-zksync-chai-matchers@^0.1.4": - version "0.1.4" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-chai-matchers/-/hardhat-zksync-chai-matchers-0.1.4.tgz#105cb0ec1367c8fcd3ce7e3773f747c71fff675b" - integrity sha512-eGQWiImg51fmayoQ7smIK/T6QZkSu38PK7xjp1RIrewGzw2ZgqFWGp40jb5oomkf8yOQPk52Hu4TwE3Ntp8CtA== +"@matterlabs/hardhat-zksync-chai-matchers@^0.2.0": + version "0.2.1" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-chai-matchers/-/hardhat-zksync-chai-matchers-0.2.1.tgz#d05136d6cf9a53c30f5e7ee9bae95abb72c1000d" + integrity sha512-LXm5r53DLTQC/KXRXzSRmVp5mEJ4tsoKAKyGck2YLHQ9CBdPoC0paVjbyB2MaEuK/k8o4lZu4uaYKgWQNUXeyQ== + dependencies: + "@ethersproject/abi" "^5.1.2" + "@matterlabs/hardhat-zksync-deploy" "^0.7.0" + "@matterlabs/hardhat-zksync-solc" "1.0.6" + chai "^4.3.7" + chai-as-promised "^7.1.1" + ethers "~5.7.2" + hardhat "^2.14.0" + ordinal "1.0.3" + zksync-ethers "^5.0.0" -"@matterlabs/hardhat-zksync-deploy@^0.6.1", "@matterlabs/hardhat-zksync-deploy@^0.6.5": +"@matterlabs/hardhat-zksync-deploy@^0.6.1": version "0.6.6" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-0.6.6.tgz#5c86cf7da859844167d62300528c3e6013ee0286" integrity sha512-QpkxK2wnyQCgaTVLdFATpfiySfr7073yPre9eq5LfKA8VxXWD4WZAuBMq700GL5UyfW9yyHkCdkSzaGigmZ4/Q== @@ -1703,20 +1713,38 @@ chalk "4.1.2" ts-morph "^19.0.0" +"@matterlabs/hardhat-zksync-deploy@^0.7.0": + version "0.7.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-0.7.0.tgz#e56b73d8f8fbd0f809a779d0028418ea7d914017" + integrity sha512-PGZcuhKsVzZ2IWPt931pK2gA+HDxnCtye+7CwvoOnM6diHeO9tB1QHFX/ywR9ErOW9wpezhPYkVDx9myFrdoqQ== + dependencies: + "@matterlabs/hardhat-zksync-solc" "^1.0.5" + chalk "4.1.2" + ts-morph "^19.0.0" + "@matterlabs/hardhat-zksync-deploy@^1.5.0": version "1.5.0" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.5.0.tgz#40cb454fb187da4bb354f3acb48762a6657fcb36" integrity sha512-7LAgYYwoKWHeR+3CyWEvA3NKBKtt7ktcr7SX6ZPgbEYqHAdXH02vxJZGwNADtMWpyYm8h+fEQkpPIgErD4NhmA== dependencies: - "@matterlabs/hardhat-zksync-solc" "^1.2.0" - chai "^4.3.4" - chalk "^4.1.2" + "@matterlabs/hardhat-zksync-solc" "^1.0.5" + chalk "4.1.2" + ts-morph "^19.0.0" + +"@matterlabs/hardhat-zksync-deploy@^1.3.0": + version "1.3.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.3.0.tgz#5c2b723318ddf6c4d3929ec225401864ff54557a" + integrity sha512-4UHOgOwIBC4JA3W8DE9GHqbAuBhCPAjtM+Oew1aiYYGkIsPUAMYsH35+4I2FzJsYyE6mD6ATmoS/HfZweQHTlQ== + dependencies: + "@matterlabs/hardhat-zksync-solc" "^1.0.4" + chai "^4.3.6" + chalk "4.1.2" fs-extra "^11.2.0" - glob "^10.4.1" + glob "^10.3.10" lodash "^4.17.21" - sinon "^18.0.0" + sinon "^17.0.1" sinon-chai "^3.7.0" - ts-morph "^22.0.0" + ts-morph "^21.0.1" "@matterlabs/hardhat-zksync-node@^0.0.1-beta.7": version "0.0.1" @@ -1728,25 +1756,26 @@ chalk "4.1.2" fs-extra "^11.1.1" -"@matterlabs/hardhat-zksync-solc@0.4.1": - version "0.4.1" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-0.4.1.tgz#e8e67d947098d7bb8925f968544d34e522af5a9c" - integrity sha512-fdlGf/2yZR5ihVNc2ubea1R/nNFXRONL29Fgz5FwB3azB13rPb76fkQgcFIg9zSufHsEy6zUUT029NkxLNA9Sw== +"@matterlabs/hardhat-zksync-solc@0.4.2": + version "0.4.2" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-0.4.2.tgz#64121082e88c5ab22eb4e9594d120e504f6af499" + integrity sha512-6NFWPSZiOAoo7wNuhMg4ztj7mMEH+tLrx09WuCbcURrHPijj/KxYNsJD6Uw5lapKr7G8H7SQISGid1/MTXVmXQ== dependencies: "@nomiclabs/hardhat-docker" "^2.0.0" chalk "4.1.2" dockerode "^3.3.4" fs-extra "^11.1.1" + proper-lockfile "^4.1.2" semver "^7.5.1" -"@matterlabs/hardhat-zksync-solc@0.4.2": - version "0.4.2" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-0.4.2.tgz#64121082e88c5ab22eb4e9594d120e504f6af499" - integrity sha512-6NFWPSZiOAoo7wNuhMg4ztj7mMEH+tLrx09WuCbcURrHPijj/KxYNsJD6Uw5lapKr7G8H7SQISGid1/MTXVmXQ== +"@matterlabs/hardhat-zksync-solc@1.0.6": + version "1.0.6" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.0.6.tgz#7ef8438e6bb15244691600e2afa77aaff7dff9f0" + integrity sha512-0icYSufXba/Bbb7v2iXuZJ+IbYsiNpR4Wy6UizHnGuFw3OMHgh+saebQphuaN9yyRL2UPGZbPkQFHWBLZj5/xQ== dependencies: "@nomiclabs/hardhat-docker" "^2.0.0" chalk "4.1.2" - dockerode "^3.3.4" + dockerode "^4.0.0" fs-extra "^11.1.1" proper-lockfile "^4.1.2" semver "^7.5.1" @@ -1760,7 +1789,7 @@ chalk "4.1.2" dockerode "^3.3.4" -"@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": +"@matterlabs/hardhat-zksync-solc@^1.0.4", "@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": version "1.1.4" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.1.4.tgz#04a2fad6fb6b6944c64ad969080ee65b9af3f617" integrity sha512-4/usbogh9neewR2/v8Dn2OzqVblZMUuT/iH2MyPZgPRZYQlL4SlZtMvokU9UQjZT6iSoaKCbbdWESHDHSzfUjA== @@ -1794,10 +1823,10 @@ sinon-chai "^3.7.0" undici "^6.18.2" -"@matterlabs/hardhat-zksync-solc@^1.2.4": - version "1.2.4" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.2.4.tgz#b14a1dbfe751058bf2d79eab747b87c7ca7d2361" - integrity sha512-9Nk95kxOZ9rl26trP/pXDLw5MqFAd0CD8FMTGDvA5HBGk6CL2wg4tS0gmucYz5R4qj09KUYOO4FW4rgd/atcGg== +"@matterlabs/hardhat-zksync-verify@^0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.4.0.tgz#f812c19950022fc36728f3796f6bdae5633e2fcd" + integrity sha512-GPZmAumFl3ZMPKbECX7Qw8CriwZKWd1DlCRhoG/6YYc6mFy4+MXkF1XsHLMs5r34N+GDOfbVZVMeftIlJC96Kg== dependencies: "@nomiclabs/hardhat-docker" "^2.0.2" chai "^4.3.4" @@ -1811,16 +1840,17 @@ sinon-chai "^3.7.0" undici "^6.18.2" -"@matterlabs/hardhat-zksync-verify@^0.2.0": - version "0.2.2" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.2.2.tgz#daa34bc4404096ed0f44461ee366c1cb0e5a4f2f" - integrity sha512-WgcItoZGY702oJ708uCP5uLvmwzDLBfhMqq2D0Kh1U/3fCTlPza9zMGUFHxKMQYsITKTeQ5zKOjKoi8MXOeUdQ== +"@matterlabs/hardhat-zksync-verify@^0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.4.0.tgz#f812c19950022fc36728f3796f6bdae5633e2fcd" + integrity sha512-GPZmAumFl3ZMPKbECX7Qw8CriwZKWd1DlCRhoG/6YYc6mFy4+MXkF1XsHLMs5r34N+GDOfbVZVMeftIlJC96Kg== dependencies: - "@matterlabs/hardhat-zksync-solc" "0.4.1" + "@matterlabs/hardhat-zksync-solc" "^1.0.5" "@nomicfoundation/hardhat-verify" "^1.0.2" axios "^1.4.0" chalk "4.1.2" dockerode "^3.3.4" + zksync-ethers "^5.0.0" "@matterlabs/hardhat-zksync-verify@^1.4.3": version "1.5.0" @@ -1841,20 +1871,20 @@ sinon "^18.0.0" sinon-chai "^3.7.0" -"@matterlabs/hardhat-zksync-vyper@^1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.1.0.tgz#b3fb304429e88a84b4abc3fe4e5a83b2f5e907bd" - integrity sha512-zDjHPeIuHRpumXiWZUbhoji4UJe09jTDRn4xnxsuVkLH7qLAm0VDFzCXYNMvEuySZSdhbSbekxJsH9Kunc5ycA== +"@matterlabs/hardhat-zksync-vyper@^1.0.8": + version "1.0.8" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.0.8.tgz#d5bd496715a1e322b0bf3926b4146b4e18ab64ff" + integrity sha512-XR7rbfDuBG5/LZWYfhQTP9gD+U24hSJHDuZ9U55wgIfiQTOxPoztFwEbQNiC39vjT5MjP/Nv8/IDrlEBkaVCgw== dependencies: - "@nomiclabs/hardhat-docker" "^2.0.2" - chai "^4.3.4" - chalk "^4.1.2" + "@nomiclabs/hardhat-docker" "^2.0.0" + chai "^4.3.6" + chalk "4.1.2" dockerode "^4.0.2" - fs-extra "^11.2.0" - semver "^7.6.2" - sinon "^18.0.0" + fs-extra "^11.1.1" + semver "^7.5.4" + sinon "^17.0.1" sinon-chai "^3.7.0" - undici "^6.18.2" + undici "^5.14.0" "@matterlabs/prettier-config@^1.0.3": version "1.0.3" @@ -1942,6 +1972,11 @@ resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-darwin-arm64/-/edr-darwin-arm64-0.4.0.tgz#bbb43f0e01f40839b0bd38c2c443cb6910ae955f" integrity sha512-7+rraFk9tCqvfemv9Ita5vTlSBAeO/S5aDKOgGRgYt0JEKZlrX161nDW6UfzMPxWl9GOLEDUzCEaYuNmXseUlg== +"@nomicfoundation/edr-darwin-arm64@0.6.2": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-darwin-arm64/-/edr-darwin-arm64-0.6.2.tgz#52c3da9dcdab72c0447b41faa63264de84c9b6c3" + integrity sha512-o4A9SaPlxJ1MS6u8Ozqq7Y0ri2XO0jASw+qkytQyBYowNFNReoGqVSs7SCwenYCDiN+1il8+M0VAUq7wOovnCQ== + "@nomicfoundation/edr-darwin-x64@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-darwin-x64/-/edr-darwin-x64-0.3.4.tgz#cbcc0a2dcda0a7c0a900a74efc6918cff134dc23" @@ -1952,6 +1987,11 @@ resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-darwin-x64/-/edr-darwin-x64-0.4.0.tgz#b1ffcd9142418fd8498de34a7336b3f977907c86" integrity sha512-+Hrc0mP9L6vhICJSfyGo/2taOToy1AIzVZawO3lU8Lf7oDQXfhQ4UkZnkWAs9SVu1eUwHUGGGE0qB8644piYgg== +"@nomicfoundation/edr-darwin-x64@0.6.2": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-darwin-x64/-/edr-darwin-x64-0.6.2.tgz#327deb548f2ae62eb456ba183970b022eb98c509" + integrity sha512-WG8BeG2eR3rFC+2/9V1hoPGW7tmNRUcuztdHUijO1h2flRsf2YWv+kEHO+EEnhGkEbgBUiwOrwlwlSMxhe2cGA== + "@nomicfoundation/edr-linux-arm64-gnu@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-gnu/-/edr-linux-arm64-gnu-0.3.4.tgz#12073f97d310176bb24ad7d48c25128ea8eff093" @@ -1962,6 +2002,11 @@ resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-gnu/-/edr-linux-arm64-gnu-0.4.0.tgz#8173d16d4f6f2b3e82ba7096d2a1ea3619d8bfa7" integrity sha512-4HUDMchNClQrVRfVTqBeSX92hM/3khCgpZkXP52qrnJPqgbdCxosOehlQYZ65wu0b/kaaZSyvACgvCLSQ5oSzQ== +"@nomicfoundation/edr-linux-arm64-gnu@0.6.2": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-gnu/-/edr-linux-arm64-gnu-0.6.2.tgz#83daecf1ced46bb4c70326e9358d0c2ae69b472a" + integrity sha512-wvHaTmOwuPjRIOqBB+paI3RBdNlG8f3e1F2zWj75EdeWwefimPzzFUs05JxOYuPO0JhDQIn2tbYUgdZbBQ+mqg== + "@nomicfoundation/edr-linux-arm64-musl@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-musl/-/edr-linux-arm64-musl-0.3.4.tgz#c9bc685d4d14bf21d9c3e326edd44e009e24492d" @@ -1972,6 +2017,11 @@ resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-musl/-/edr-linux-arm64-musl-0.4.0.tgz#b1ce293a7c3e0d9f70391e1aef1a82b83b997567" integrity sha512-D4J935ZRL8xfnP3zIFlCI9jXInJ0loDUkCTLeCEbOf2uuDumWDghKNQlF1itUS+EHaR1pFVBbuwqq8hVK0dASg== +"@nomicfoundation/edr-linux-arm64-musl@0.6.2": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-arm64-musl/-/edr-linux-arm64-musl-0.6.2.tgz#b0666da450d68364975562ec5f7c2b6ee718e36b" + integrity sha512-UrOAxnsywUcEngQM2ZxIuucci0VX29hYxX7jcpwZU50HICCjxNsxnuXYPxv+IM+6gbhBY1FYvYJGW4PJcP1Nyw== + "@nomicfoundation/edr-linux-x64-gnu@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-gnu/-/edr-linux-x64-gnu-0.3.4.tgz#37486cbe317b8caf7961e500fc0150c45c895a56" @@ -1982,6 +2032,11 @@ resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-gnu/-/edr-linux-x64-gnu-0.4.0.tgz#4c12c4e4bfd3d837f5663ad7cbf7cb6d5634ef83" integrity sha512-6x7HPy+uN5Cb9N77e2XMmT6+QSJ+7mRbHnhkGJ8jm4cZvWuj2Io7npOaeHQ3YHK+TiQpTnlbkjoOIpEwpY3XZA== +"@nomicfoundation/edr-linux-x64-gnu@0.6.2": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-gnu/-/edr-linux-x64-gnu-0.6.2.tgz#c61ae692ddf906e65078962e6d86daaa04f95d0d" + integrity sha512-gYxlPLi7fkNcmDmCwZWQa5eOfNcTDundE+TWjpyafxLAjodQuKBD4I0p4XbnuocHjoBEeNzLWdE5RShbZEXEJA== + "@nomicfoundation/edr-linux-x64-musl@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-musl/-/edr-linux-x64-musl-0.3.4.tgz#399278807100a1833f6c8a39c17d5beaaf7a9223" @@ -1992,6 +2047,11 @@ resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-musl/-/edr-linux-x64-musl-0.4.0.tgz#8842004aa1a47c504f10863687da28b65dca7baa" integrity sha512-3HFIJSXgyubOiaN4MWGXx2xhTnhwlJk0PiSYNf9+L/fjBtcRkb2nM910ZJHTvqCb6OT98cUnaKuAYdXIW2amgw== +"@nomicfoundation/edr-linux-x64-musl@0.6.2": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-linux-x64-musl/-/edr-linux-x64-musl-0.6.2.tgz#a2714ee7a62faf55c7994c7eaddeb32d0622801d" + integrity sha512-ev5hy9wmiHZi1GKQ1l6PJ2+UpsUh+DvK9AwiCZVEdaicuhmTfO6fdL4szgE4An8RU+Ou9DeiI1tZcq6iw++Wuw== + "@nomicfoundation/edr-win32-arm64-msvc@0.3.4": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-win32-arm64-msvc/-/edr-win32-arm64-msvc-0.3.4.tgz#879028e2708538fd54efc349c1a4de107a15abb4" @@ -2012,6 +2072,11 @@ resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-win32-x64-msvc/-/edr-win32-x64-msvc-0.4.0.tgz#29d8bbb2edf9912a95f5453855cf17cdcb269957" integrity sha512-CP4GsllEfXEz+lidcGYxKe5rDJ60TM5/blB5z/04ELVvw6/CK9eLcYeku7HV0jvV7VE6dADYKSdQyUkvd0El+A== +"@nomicfoundation/edr-win32-x64-msvc@0.6.2": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr-win32-x64-msvc/-/edr-win32-x64-msvc-0.6.2.tgz#5507884a81d57f337363b7fbf9bf4ae93ff69c0c" + integrity sha512-2ZXVVcmdmEeX0Hb3IAurHUjgU3H1GIk9h7Okosdjgl3tl+BaNHxi84Us+DblynO1LRj8nL/ATeVtSfBuW3Z1vw== + "@nomicfoundation/edr@^0.3.1": version "0.3.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/edr/-/edr-0.3.4.tgz#e8eaf41963460139c47b0785f1a6a2a1c1b24ae0" @@ -2040,6 +2105,19 @@ "@nomicfoundation/edr-linux-x64-musl" "0.4.0" "@nomicfoundation/edr-win32-x64-msvc" "0.4.0" +"@nomicfoundation/edr@^0.6.1": + version "0.6.2" + resolved "https://registry.yarnpkg.com/@nomicfoundation/edr/-/edr-0.6.2.tgz#6911d9a0b36bc054747dcd1ae894ce447400be31" + integrity sha512-yPUegN3sTWiAkRatCmGRkuvMgD9HSSpivl2ebAqq0aU2xgC7qmIO+YQPxQ3Z46MUoi7MrTf4e6GpbT4S/8x0ew== + dependencies: + "@nomicfoundation/edr-darwin-arm64" "0.6.2" + "@nomicfoundation/edr-darwin-x64" "0.6.2" + "@nomicfoundation/edr-linux-arm64-gnu" "0.6.2" + "@nomicfoundation/edr-linux-arm64-musl" "0.6.2" + "@nomicfoundation/edr-linux-x64-gnu" "0.6.2" + "@nomicfoundation/edr-linux-x64-musl" "0.6.2" + "@nomicfoundation/edr-win32-x64-msvc" "0.6.2" + "@nomicfoundation/ethereumjs-common@4.0.4": version "4.0.4" resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-common/-/ethereumjs-common-4.0.4.tgz#9901f513af2d4802da87c66d6f255b510bef5acb" @@ -2231,12 +2309,12 @@ resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-waffle/-/hardhat-waffle-2.0.6.tgz#d11cb063a5f61a77806053e54009c40ddee49a54" integrity sha512-+Wz0hwmJGSI17B+BhU/qFRZ1l6/xMW82QGXE/Gi+WTmwgJrQefuBs1lIf7hzQ1hLk6hpkvb/zwcNkpVKRYTQYg== -"@openzeppelin/contracts-upgradeable@4.9.5": +"@openzeppelin/contracts-upgradeable-v4@npm:@openzeppelin/contracts-upgradeable@4.9.5": version "4.9.5" resolved "https://registry.yarnpkg.com/@openzeppelin/contracts-upgradeable/-/contracts-upgradeable-4.9.5.tgz#572b5da102fc9be1d73f34968e0ca56765969812" integrity sha512-f7L1//4sLlflAN7fVzJLoRedrf5Na3Oal5PZfIq55NFcVZ90EpV1q5xOvL4lFvg3MNICSDr2hH0JUBxwlxcoPg== -"@openzeppelin/contracts@4.9.5": +"@openzeppelin/contracts-v4@npm:@openzeppelin/contracts@4.9.5": version "4.9.5" resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.9.5.tgz#1eed23d4844c861a1835b5d33507c1017fa98de8" integrity sha512-ZK+W5mVhRppff9BE6YdR8CC52C8zAvsVAiWhEtQ5+oNxFE6h1WdeWo+FJSF8KKvtxxVYZ7MTP/5KoVpAU3aSWg== @@ -2256,6 +2334,27 @@ resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.1.1.tgz#1ec17e2edbec25c8306d424ecfbf13c7de1aaa31" integrity sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA== +"@pnpm/config.env-replace@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz#ab29da53df41e8948a00f2433f085f54de8b3a4c" + integrity sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w== + +"@pnpm/network.ca-file@^1.0.1": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz#2ab05e09c1af0cdf2fcf5035bea1484e222f7983" + integrity sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA== + dependencies: + graceful-fs "4.2.10" + +"@pnpm/npm-conf@^2.1.0": + version "2.3.1" + resolved "https://registry.yarnpkg.com/@pnpm/npm-conf/-/npm-conf-2.3.1.tgz#bb375a571a0bd63ab0a23bece33033c683e9b6b0" + integrity sha512-c83qWb22rNRuB0UaVCI0uRPNRr8Z0FWnEIvT47jiHAmOIUHbBOg5XvV7pM5x+rKn9HRpjxquDbXYSXr3fAKFcw== + dependencies: + "@pnpm/config.env-replace" "^1.1.0" + "@pnpm/network.ca-file" "^1.0.1" + config-chain "^1.1.11" + "@protobufjs/aspromise@^1.1.1", "@protobufjs/aspromise@^1.1.2": version "1.1.2" resolved "https://registry.yarnpkg.com/@protobufjs/aspromise/-/aspromise-1.1.2.tgz#9b8b0cc663d669a7d8f6f5d0893a14d348f30fbf" @@ -2458,6 +2557,11 @@ resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== +"@sindresorhus/is@^5.2.0": + version "5.6.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-5.6.0.tgz#41dd6093d34652cddb5d5bdeee04eafc33826668" + integrity sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g== + "@sinonjs/commons@^2.0.0": version "2.0.0" resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-2.0.0.tgz#fd4ca5b063554307e8327b4564bd56d3b73924a3" @@ -2524,6 +2628,13 @@ resolved "https://registry.yarnpkg.com/@solidity-parser/parser/-/parser-0.18.0.tgz#8e77a02a09ecce957255a2f48c9a7178ec191908" integrity sha512-yfORGUIPgLck41qyN7nbwJRAx17/jAIXCTanHOJZhB6PJ1iAk/84b/xlsVKFSyNyLXIj0dhppoE0+CRws7wlzA== +"@szmarczak/http-timer@^5.0.1": + version "5.0.1" + resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-5.0.1.tgz#c7c1bf1141cdd4751b0399c8fc7b8b664cd5be3a" + integrity sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw== + dependencies: + defer-to-connect "^2.0.1" + "@trufflesuite/bigint-buffer@1.1.10": version "1.1.10" resolved "https://registry.yarnpkg.com/@trufflesuite/bigint-buffer/-/bigint-buffer-1.1.10.tgz#a1d9ca22d3cad1a138b78baaf15543637a3e1692" @@ -2548,10 +2659,10 @@ mkdirp "^2.1.6" path-browserify "^1.0.1" -"@ts-morph/common@~0.23.0": - version "0.23.0" - resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.23.0.tgz#bd4ddbd3f484f29476c8bd985491592ae5fc147e" - integrity sha512-m7Lllj9n/S6sOkCkRftpM7L24uvmfXQFedlW/4hENcuJH1HHm9u5EgxZb9uVjQSCGrbBWBkOGgcTxNg36r6ywA== +"@ts-morph/common@~0.22.0": + version "0.22.0" + resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.22.0.tgz#8951d451622a26472fbc3a227d6c3a90e687a683" + integrity sha512-HqNBuV/oIlMKdkLshXd1zKBqNQCsuPEsgQOkfFQ/eUKjRlwndXW1AjN9LVkBEIukm00gGXSRmfkl0Wv5VXLnlw== dependencies: fast-glob "^3.3.2" minimatch "^9.0.3" @@ -2703,6 +2814,11 @@ dependencies: "@types/node" "*" +"@types/http-cache-semantics@^4.0.2": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz#b979ebad3919799c979b17c72621c0bc0a31c6c4" + integrity sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA== + "@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0", "@types/istanbul-lib-coverage@^2.0.1": version "2.0.6" resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz#7739c232a1fee9b4d3ce8985f314c0c6d33549d7" @@ -3259,6 +3375,11 @@ antlr4@^4.11.0: resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.1.tgz#1e0a1830a08faeb86217cb2e6c34716004e4253d" integrity sha512-kiXTspaRYvnIArgE97z5YVVf/cDVQABr3abFRR6mE7yesLMkgu4ujuyV/sgxafQ8wgve0DJQUJ38Z8tkgA2izA== +antlr4@^4.13.1-patch-1: + version "4.13.2" + resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.2.tgz#0d084ad0e32620482a9c3a0e2470c02e72e4006d" + integrity sha512-QiVbZhyy4xAZ17UPEuG3YTOt8ZaoeOR1CvEAqrEsDBsOqINslaB147i9xqljZqoyf5S+EUlGStaj+t22LT9MOg== + antlr4ts@^0.5.0-alpha.4: version "0.5.0-alpha.4" resolved "https://registry.yarnpkg.com/antlr4ts/-/antlr4ts-0.5.0-alpha.4.tgz#71702865a87478ed0b40c0709f422cf14d51652a" @@ -3829,6 +3950,24 @@ bytes@3.1.2: resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== +cacheable-lookup@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz#3476a8215d046e5a3202a9209dd13fec1f933a27" + integrity sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w== + +cacheable-request@^10.2.8: + version "10.2.14" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-10.2.14.tgz#eb915b665fda41b79652782df3f553449c406b9d" + integrity sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ== + dependencies: + "@types/http-cache-semantics" "^4.0.2" + get-stream "^6.0.1" + http-cache-semantics "^4.1.1" + keyv "^4.5.3" + mimic-response "^4.0.0" + normalize-url "^8.0.0" + responselike "^3.0.0" + call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7: version "1.0.7" resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" @@ -3904,6 +4043,19 @@ chai@^4.3.10, chai@^4.3.4, chai@^4.3.6: pathval "^1.1.1" type-detect "^4.0.8" +chai@^4.3.7: + version "4.5.0" + resolved "https://registry.yarnpkg.com/chai/-/chai-4.5.0.tgz#707e49923afdd9b13a8b0b47d33d732d13812fd8" + integrity sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw== + dependencies: + assertion-error "^1.1.0" + check-error "^1.0.3" + deep-eql "^4.1.3" + get-func-name "^2.0.2" + loupe "^2.3.6" + pathval "^1.1.1" + type-detect "^4.1.0" + chalk@4.1.2, chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" @@ -3973,6 +4125,13 @@ chokidar@^3.4.0: optionalDependencies: fsevents "~2.3.2" +chokidar@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-4.0.1.tgz#4a6dff66798fb0f72a94f616abbd7e1a19f31d41" + integrity sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA== + dependencies: + readdirp "^4.0.1" + chownr@^1.0.1, chownr@^1.1.1: version "1.1.4" resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b" @@ -4070,11 +4229,6 @@ code-block-writer@^12.0.0: resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-12.0.0.tgz#4dd58946eb4234105aff7f0035977b2afdc2a770" integrity sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w== -code-block-writer@^13.0.1: - version "13.0.2" - resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-13.0.2.tgz#e1c6c3dbe5d38b4ac76fb62c4d4b2fc4bf04c9c1" - integrity sha512-XfXzAGiStXSmCIwrkdfvc7FS5Dtj8yelCtyOf2p2skCAfvLd6zu0rGzuS9NSCO3bq1JKpFZ7tbKdKlcd5occQA== - collect-v8-coverage@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz#c0b29bcd33bcd0779a1344c2136051e6afd3d9e9" @@ -4214,6 +4368,14 @@ concat-stream@^1.6.0, concat-stream@^1.6.2, concat-stream@~1.6.2: readable-stream "^2.2.2" typedarray "^0.0.6" +config-chain@^1.1.11: + version "1.1.13" + resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.13.tgz#fad0795aa6a6cdaff9ed1b68e9dff94372c232f4" + integrity sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ== + dependencies: + ini "^1.3.4" + proto-list "~1.2.1" + convert-source-map@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" @@ -4417,6 +4579,13 @@ decamelize@^4.0.0: resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-4.0.0.tgz#aa472d7bf660eb15f3494efd531cab7f2a709837" integrity sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ== +decompress-response@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-6.0.0.tgz#ca387612ddb7e104bd16d85aab00d5ecf09c66fc" + integrity sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ== + dependencies: + mimic-response "^3.1.0" + dedent@^1.0.0: version "1.5.1" resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.1.tgz#4f3fc94c8b711e9bb2800d185cd6ad20f2a90aff" @@ -4449,6 +4618,11 @@ deepmerge@^4.2.2: resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.3.1.tgz#44b5f2147cd3b00d4b56137685966f26fd25dd4a" integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A== +defer-to-connect@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-2.0.1.tgz#8016bdb4143e4632b77a3449c6236277de520587" + integrity sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg== + deferred-leveldown@~5.3.0: version "5.3.0" resolved "https://registry.yarnpkg.com/deferred-leveldown/-/deferred-leveldown-5.3.0.tgz#27a997ad95408b61161aa69bd489b86c71b78058" @@ -4577,7 +4751,7 @@ dockerode@^3.3.4: docker-modem "^3.0.0" tar-fs "~2.0.1" -dockerode@^4.0.2: +dockerode@^4.0.0, dockerode@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/dockerode/-/dockerode-4.0.2.tgz#dedc8529a1db3ac46d186f5912389899bc309f7d" integrity sha512-9wM1BVpVMFr2Pw3eJNXrYYt6DT9k0xMcsSCjtPvyQ+xa1iPg/Mo3T/gUcwI0B2cczqCeCYRPF8yFYDwtFXT0+w== @@ -5258,7 +5432,7 @@ ethereumjs-util@^7.1.1, ethereumjs-util@^7.1.3, ethereumjs-util@^7.1.4, ethereum ethereum-cryptography "^0.1.3" rlp "^2.2.4" -ethers@^5.0.2, ethers@^5.7.0, ethers@^5.7.2, ethers@~5.7.0: +ethers@^5.0.2, ethers@^5.7.0, ethers@^5.7.2, ethers@~5.7.0, ethers@~5.7.2: version "5.7.2" resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.7.2.tgz#3a7deeabbb8c030d4126b24f84e525466145872e" integrity sha512-wswUsmWo1aOK8rR7DIKiWSw9DbLWe6x98Jrn8wcTflTVvaXhAMaB5zGAXy0GYQEQp9iO1iSHWVyARQm11zUtyg== @@ -5669,6 +5843,11 @@ forever-agent@~0.6.1: resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" integrity sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw== +form-data-encoder@^2.1.2: + version "2.1.4" + resolved "https://registry.yarnpkg.com/form-data-encoder/-/form-data-encoder-2.1.4.tgz#261ea35d2a70d48d30ec7a9603130fa5515e9cd5" + integrity sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw== + form-data@^2.2.0: version "2.5.1" resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.5.1.tgz#f2cbec57b5e59e23716e128fe44d4e5dd23895f4" @@ -5862,7 +6041,7 @@ get-stdin@~9.0.0: resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-9.0.0.tgz#3983ff82e03d56f1b2ea0d3e60325f39d703a575" integrity sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA== -get-stream@^6.0.0: +get-stream@^6.0.0, get-stream@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== @@ -5947,17 +6126,16 @@ glob@8.1.0, glob@^8.0.3: minimatch "^5.0.1" once "^1.3.0" -glob@^10.4.1: - version "10.4.5" - resolved "https://registry.yarnpkg.com/glob/-/glob-10.4.5.tgz#f4d9f0b90ffdbab09c9d77f5f29b4262517b0956" - integrity sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg== +glob@^10.3.10: + version "10.3.16" + resolved "https://registry.yarnpkg.com/glob/-/glob-10.3.16.tgz#bf6679d5d51279c8cfae4febe0d051d2a4bf4c6f" + integrity sha512-JDKXl1DiuuHJ6fVS2FXjownaavciiHNUU4mOvV/B793RLh05vZL1rcPnCSaOgv1hDT6RDlY7AB7ZUvFYAtPgAw== dependencies: foreground-child "^3.1.0" jackspeak "^3.1.2" - minimatch "^9.0.4" - minipass "^7.1.2" - package-json-from-dist "^1.0.0" - path-scurry "^1.11.1" + minimatch "^9.0.1" + minipass "^7.0.4" + path-scurry "^1.11.0" glob@^5.0.15: version "5.0.15" @@ -6061,6 +6239,28 @@ gopd@^1.0.1: dependencies: get-intrinsic "^1.1.3" +got@^12.1.0: + version "12.6.1" + resolved "https://registry.yarnpkg.com/got/-/got-12.6.1.tgz#8869560d1383353204b5a9435f782df9c091f549" + integrity sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ== + dependencies: + "@sindresorhus/is" "^5.2.0" + "@szmarczak/http-timer" "^5.0.1" + cacheable-lookup "^7.0.0" + cacheable-request "^10.2.8" + decompress-response "^6.0.0" + form-data-encoder "^2.1.2" + get-stream "^6.0.1" + http2-wrapper "^2.1.10" + lowercase-keys "^3.0.0" + p-cancelable "^3.0.0" + responselike "^3.0.0" + +graceful-fs@4.2.10: + version "4.2.10" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.10.tgz#147d3a006da4ca3ce14728c7aefc287c367d7a6c" + integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA== + graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.9: version "4.2.11" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" @@ -6178,6 +6378,56 @@ hardhat@=2.22.2: uuid "^8.3.2" ws "^7.4.6" +hardhat@^2.14.0: + version "2.22.12" + resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.12.tgz#a6d0be011fc009c50c454da367ad28c29f58d446" + integrity sha512-yok65M+LsOeTBHQsjg//QreGCyrsaNmeLVzhTFqlOvZ4ZE5y69N0wRxH1b2BC9dGK8S8OPUJMNiL9X0RAvbm8w== + dependencies: + "@ethersproject/abi" "^5.1.2" + "@metamask/eth-sig-util" "^4.0.0" + "@nomicfoundation/edr" "^0.6.1" + "@nomicfoundation/ethereumjs-common" "4.0.4" + "@nomicfoundation/ethereumjs-tx" "5.0.4" + "@nomicfoundation/ethereumjs-util" "9.0.4" + "@nomicfoundation/solidity-analyzer" "^0.1.0" + "@sentry/node" "^5.18.1" + "@types/bn.js" "^5.1.0" + "@types/lru-cache" "^5.1.0" + adm-zip "^0.4.16" + aggregate-error "^3.0.0" + ansi-escapes "^4.3.0" + boxen "^5.1.2" + chalk "^2.4.2" + chokidar "^4.0.0" + ci-info "^2.0.0" + debug "^4.1.1" + enquirer "^2.3.0" + env-paths "^2.2.0" + ethereum-cryptography "^1.0.3" + ethereumjs-abi "^0.6.8" + find-up "^2.1.0" + fp-ts "1.19.3" + fs-extra "^7.0.1" + glob "7.2.0" + immutable "^4.0.0-rc.12" + io-ts "1.10.4" + json-stream-stringify "^3.1.4" + keccak "^3.0.2" + lodash "^4.17.11" + mnemonist "^0.38.0" + mocha "^10.0.0" + p-map "^4.0.0" + raw-body "^2.4.1" + resolve "1.17.0" + semver "^6.3.0" + solc "0.8.26" + source-map-support "^0.5.13" + stacktrace-parser "^0.1.10" + tsort "0.0.1" + undici "^5.14.0" + uuid "^8.3.2" + ws "^7.4.6" + hardhat@^2.22.5: version "2.22.5" resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.5.tgz#7e1a4311fa9e34a1cfe337784eae06706f6469a5" @@ -6334,6 +6584,11 @@ http-basic@^8.1.1: http-response-object "^3.0.1" parse-cache-control "^1.0.1" +http-cache-semantics@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz#abe02fcb2985460bf0323be664436ec3476a6d5a" + integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ== + http-errors@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-2.0.0.tgz#b7774a1486ef73cf7667ac9ae0858c012c57b9d3" @@ -6361,6 +6616,14 @@ http-signature@~1.2.0: jsprim "^1.2.2" sshpk "^1.7.0" +http2-wrapper@^2.1.10: + version "2.2.1" + resolved "https://registry.yarnpkg.com/http2-wrapper/-/http2-wrapper-2.2.1.tgz#310968153dcdedb160d8b72114363ef5fce1f64a" + integrity sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ== + dependencies: + quick-lru "^5.1.1" + resolve-alpn "^1.2.0" + https-proxy-agent@^5.0.0: version "5.0.1" resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz#c59ef224a04fe8b754f3db0063a25ea30d0005d6" @@ -6465,7 +6728,7 @@ inherits@2.0.3: resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw== -ini@^1.3.5, ini@~1.3.0: +ini@^1.3.4, ini@^1.3.5, ini@~1.3.0: version "1.3.8" resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== @@ -7243,6 +7506,11 @@ json-stable-stringify-without-jsonify@^1.0.1: resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== +json-stream-stringify@^3.1.4: + version "3.1.5" + resolved "https://registry.yarnpkg.com/json-stream-stringify/-/json-stream-stringify-3.1.5.tgz#7184383b397a83ac5da33b62371217522e6ac2f6" + integrity sha512-wurRuTiw27mck9MWaUIGAunfwqhPDxnXQVN/+Rzi+IEQUUALU10AZs1nWkSdtjH7PAVuAUcqQjH11S/JHOWeaA== + json-stringify-safe@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" @@ -7412,6 +7680,13 @@ kleur@^3.0.3: dependencies: dotenv "^16.0.3" +latest-version@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-7.0.0.tgz#843201591ea81a4d404932eeb61240fe04e9e5da" + integrity sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg== + dependencies: + package-json "^8.1.0" + level-codec@^9.0.0: version "9.0.2" resolved "https://registry.yarnpkg.com/level-codec/-/level-codec-9.0.2.tgz#fd60df8c64786a80d44e63423096ffead63d8cbc" @@ -7681,6 +7956,11 @@ loupe@^2.3.6: dependencies: get-func-name "^2.0.1" +lowercase-keys@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-3.0.0.tgz#c5e7d442e37ead247ae9db117a9d0a467c89d4f2" + integrity sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ== + lru-cache@^10.2.0: version "10.2.2" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.2.2.tgz#48206bc114c1252940c41b25b41af5b545aca878" @@ -7922,6 +8202,16 @@ mimic-fn@^2.1.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== +mimic-response@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-3.1.0.tgz#2d1d59af9c1b129815accc2c46a022a5ce1fa3c9" + integrity sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ== + +mimic-response@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-4.0.0.tgz#35468b19e7c75d10f5165ea25e75a5ceea7cf70f" + integrity sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg== + minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" @@ -7974,20 +8264,13 @@ minimatch@^7.4.3: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.3: +minimatch@^9.0.1, minimatch@^9.0.3: version "9.0.4" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.4: - version "9.0.5" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" - integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== - dependencies: - brace-expansion "^2.0.1" - minimatch@~3.0.4: version "3.0.8" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.8.tgz#5e6a59bd11e2ab0de1cfb843eb2d82e546c321c1" @@ -8000,16 +8283,11 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8, minimist@~1. resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0": +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.0.4: version "7.1.1" resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.1.tgz#f7f85aff59aa22f110b20e27692465cf3bf89481" integrity sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA== -minipass@^7.1.2: - version "7.1.2" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" - integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== - mkdirp-classic@^0.5.2: version "0.5.3" resolved "https://registry.yarnpkg.com/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz#fa10c9115cc6d8865be221ba47ee9bed78601113" @@ -8263,6 +8541,11 @@ normalize-path@^3.0.0, normalize-path@~3.0.0: resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== +normalize-url@^8.0.0: + version "8.0.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-8.0.1.tgz#9b7d96af9836577c58f5883e939365fa15623a4a" + integrity sha512-IO9QvjUMWxPQQhs60oOu10CRkWCiZzSUkzbXGGV9pviYl1fXYcvkzQ5jV9z8Y6un8ARoVRl4EtC6v6jNqbaJ/w== + npm-run-all@^4.1.5: version "4.1.5" resolved "https://registry.yarnpkg.com/npm-run-all/-/npm-run-all-4.1.5.tgz#04476202a15ee0e2e214080861bff12a51d98fba" @@ -8406,7 +8689,7 @@ optionator@^0.9.1, optionator@^0.9.3: prelude-ls "^1.2.1" type-check "^0.4.0" -ordinal@^1.0.3: +ordinal@1.0.3, ordinal@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/ordinal/-/ordinal-1.0.3.tgz#1a3c7726a61728112f50944ad7c35c06ae3a0d4d" integrity sha512-cMddMgb2QElm8G7vdaa02jhUNbTSrhsgAGUz1OokD83uJTwSUn+nKoNoKVVaRa08yF6sgfO7Maou1+bgLd9rdQ== @@ -8416,6 +8699,11 @@ os-tmpdir@~1.0.2: resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" integrity sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g== +p-cancelable@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-3.0.0.tgz#63826694b54d61ca1c20ebcb6d3ecf5e14cd8050" + integrity sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw== + p-limit@^1.1.0: version "1.3.0" resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" @@ -8482,10 +8770,25 @@ p-try@^2.0.0: resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== -package-json-from-dist@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz#e501cd3094b278495eb4258d4c9f6d5ac3019f00" - integrity sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw== +package-json@^8.1.0: + version "8.1.1" + resolved "https://registry.yarnpkg.com/package-json/-/package-json-8.1.1.tgz#3e9948e43df40d1e8e78a85485f1070bf8f03dc8" + integrity sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA== + dependencies: + got "^12.1.0" + registry-auth-token "^5.0.1" + registry-url "^6.0.0" + semver "^7.3.7" + +package-json@^8.1.0: + version "8.1.1" + resolved "https://registry.yarnpkg.com/package-json/-/package-json-8.1.1.tgz#3e9948e43df40d1e8e78a85485f1070bf8f03dc8" + integrity sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA== + dependencies: + got "^12.1.0" + registry-auth-token "^5.0.1" + registry-url "^6.0.0" + semver "^7.3.7" parent-module@^1.0.0: version "1.0.1" @@ -8552,7 +8855,7 @@ path-parse@^1.0.6, path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -path-scurry@^1.11.1: +path-scurry@^1.11.0: version "1.11.1" resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2" integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA== @@ -8892,6 +9195,11 @@ proper-lockfile@^4.1.2: retry "^0.12.0" signal-exit "^3.0.2" +proto-list@~1.2.1: + version "1.2.4" + resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849" + integrity sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA== + protobufjs@^7.2.5: version "7.2.6" resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-7.2.6.tgz#4a0ccd79eb292717aacf07530a07e0ed20278215" @@ -8978,6 +9286,11 @@ queue-microtask@^1.2.2, queue-microtask@^1.2.3: resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== +quick-lru@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/quick-lru/-/quick-lru-5.1.1.tgz#366493e6b3e42a3a6885e2e99d18f80fb7a8c932" + integrity sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA== + railroad-diagrams@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/railroad-diagrams/-/railroad-diagrams-1.0.0.tgz#eb7e6267548ddedfb899c1b90e57374559cddb7e" @@ -9008,7 +9321,7 @@ raw-body@^2.4.1: iconv-lite "0.4.24" unpipe "1.0.0" -rc@~1.2.7: +rc@1.2.8, rc@~1.2.7: version "1.2.8" resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== @@ -9064,6 +9377,11 @@ readable-stream@~1.0.26-4: isarray "0.0.1" string_decoder "~0.10.x" +readdirp@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-4.0.1.tgz#b2fe35f8dca63183cd3b86883ecc8f720ea96ae6" + integrity sha512-GkMg9uOTpIWWKbSsgwb5fA4EavTR+SG/PMPoAY8hkhHfEEY0/vqljY+XHqtDf2cr2IJtoNRDbrrEpZUiZCkYRw== + readdirp@~3.6.0: version "3.6.0" resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" @@ -9110,6 +9428,20 @@ regexpp@^3.1.0: resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.2.0.tgz#0425a2768d8f23bad70ca4b90461fa2f1213e1b2" integrity sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg== +registry-auth-token@^5.0.1: + version "5.0.2" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-5.0.2.tgz#8b026cc507c8552ebbe06724136267e63302f756" + integrity sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ== + dependencies: + "@pnpm/npm-conf" "^2.1.0" + +registry-url@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-6.0.1.tgz#056d9343680f2f64400032b1e199faa692286c58" + integrity sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q== + dependencies: + rc "1.2.8" + req-cwd@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/req-cwd/-/req-cwd-2.0.0.tgz#d4082b4d44598036640fb73ddea01ed53db49ebc" @@ -9160,6 +9492,11 @@ require-from-string@^2.0.0, require-from-string@^2.0.2: resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== +resolve-alpn@^1.2.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/resolve-alpn/-/resolve-alpn-1.2.1.tgz#b7adbdac3546aaaec20b45e7d8265927072726f9" + integrity sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g== + resolve-cwd@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-3.0.0.tgz#0f0075f1bb2544766cf73ba6a6e2adfebcb13f2d" @@ -9213,6 +9550,13 @@ resolve@^1.1.6, resolve@^1.10.0, resolve@^1.12.0, resolve@^1.20.0, resolve@^1.22 path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" +responselike@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-3.0.0.tgz#20decb6c298aff0dbee1c355ca95461d42823626" + integrity sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg== + dependencies: + lowercase-keys "^3.0.0" + restore-cursor@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" @@ -9613,6 +9957,19 @@ solc@0.8.17: semver "^5.5.0" tmp "0.0.33" +solc@0.8.26: + version "0.8.26" + resolved "https://registry.yarnpkg.com/solc/-/solc-0.8.26.tgz#afc78078953f6ab3e727c338a2fefcd80dd5b01a" + integrity sha512-yiPQNVf5rBFHwN6SIf3TUUvVAFKcQqmSUFeq+fb6pNRCo0ZCgpYOZDi3BVoezCPIAcKrVYd/qXlBLUP9wVrZ9g== + dependencies: + command-exists "^1.2.8" + commander "^8.1.0" + follow-redirects "^1.12.1" + js-sha3 "0.8.0" + memorystream "^0.3.1" + semver "^5.5.0" + tmp "0.0.33" + solhint-plugin-prettier@^0.0.5: version "0.0.5" resolved "https://registry.yarnpkg.com/solhint-plugin-prettier/-/solhint-plugin-prettier-0.0.5.tgz#e3b22800ba435cd640a9eca805a7f8bc3e3e6a6b" @@ -9620,6 +9977,32 @@ solhint-plugin-prettier@^0.0.5: dependencies: prettier-linter-helpers "^1.0.0" +solhint@4.5.4: + version "4.5.4" + resolved "https://registry.yarnpkg.com/solhint/-/solhint-4.5.4.tgz#171cf33f46c36b8499efe60c0e425f6883a54e50" + integrity sha512-Cu1XiJXub2q1eCr9kkJ9VPv1sGcmj3V7Zb76B0CoezDOB9bu3DxKIFFH7ggCl9fWpEPD6xBmRLfZrYijkVmujQ== + dependencies: + "@solidity-parser/parser" "^0.18.0" + ajv "^6.12.6" + antlr4 "^4.13.1-patch-1" + ast-parents "^0.0.1" + chalk "^4.1.2" + commander "^10.0.0" + cosmiconfig "^8.0.0" + fast-diff "^1.2.0" + glob "^8.0.3" + ignore "^5.2.4" + js-yaml "^4.1.0" + latest-version "^7.0.0" + lodash "^4.17.21" + pluralize "^8.0.0" + semver "^7.5.2" + strip-ansi "^6.0.1" + table "^6.8.1" + text-table "^0.2.0" + optionalDependencies: + prettier "^2.8.3" + solhint@^3.3.2, solhint@^3.6.2: version "3.6.2" resolved "https://registry.yarnpkg.com/solhint/-/solhint-3.6.2.tgz#2b2acbec8fdc37b2c68206a71ba89c7f519943fe" @@ -10019,7 +10402,7 @@ synckit@^0.8.6: "system-contracts@link:contracts/system-contracts": version "0.1.0" dependencies: - "@matterlabs/hardhat-zksync-deploy" "^0.6.5" + "@matterlabs/hardhat-zksync-deploy" "^0.7.0" "@matterlabs/hardhat-zksync-solc" "^1.1.4" "@matterlabs/hardhat-zksync-verify" "^1.4.3" commander "^9.4.1" @@ -10030,7 +10413,6 @@ synckit@^0.8.6: fast-glob "^3.3.2" hardhat "=2.22.2" preprocess "^3.2.0" - zksync-ethers "^5.9.0" table-layout@^1.0.2: version "1.0.2" @@ -10282,13 +10664,13 @@ ts-morph@^19.0.0: "@ts-morph/common" "~0.20.0" code-block-writer "^12.0.0" -ts-morph@^22.0.0: - version "22.0.0" - resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-22.0.0.tgz#5532c592fb6dddae08846f12c9ab0fc590b1d42e" - integrity sha512-M9MqFGZREyeb5fTl6gNHKZLqBQA0TjA1lea+CR48R8EBTDuWrNqW6ccC5QvjNR4s6wDumD3LTCjOFSp9iwlzaw== +ts-morph@^21.0.1: + version "21.0.1" + resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-21.0.1.tgz#712302a0f6e9dbf1aa8d9cf33a4386c4b18c2006" + integrity sha512-dbDtVdEAncKctzrVZ+Nr7kHpHkv+0JDJb2MjjpBaj8bFeCkePU9rHfMklmhuLFnpeq/EJZk2IhStY6NzqgjOkg== dependencies: - "@ts-morph/common" "~0.23.0" - code-block-writer "^13.0.1" + "@ts-morph/common" "~0.22.0" + code-block-writer "^12.0.0" ts-node@^10.1.0, ts-node@^10.7.0: version "10.9.2" @@ -10387,6 +10769,11 @@ type-detect@4.0.8, type-detect@^4.0.0, type-detect@^4.0.8: resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== +type-detect@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.1.0.tgz#deb2453e8f08dcae7ae98c626b13dddb0155906c" + integrity sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw== + type-fest@^0.20.2: version "0.20.2" resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.20.2.tgz#1bf207f4b28f91583666cb5fbd327887301cd5f4" @@ -10919,7 +11306,7 @@ zksync-ethers@5.8.0-beta.5: dependencies: ethers "~5.7.0" -zksync-ethers@^5.9.0: +zksync-ethers@^5.0.0, zksync-ethers@^5.9.0: version "5.9.2" resolved "https://registry.yarnpkg.com/zksync-ethers/-/zksync-ethers-5.9.2.tgz#1c5f34cb25ac0b040fd1a6118f2ba1c2c3bda090" integrity sha512-Y2Mx6ovvxO6UdC2dePLguVzvNToOY8iLWeq5ne+jgGSJxAi/f4He/NF6FNsf6x1aWX0o8dy4Df8RcOQXAkj5qw== @@ -10930,10 +11317,3 @@ zksync-ethers@^6.9.0: version "6.9.0" resolved "https://registry.yarnpkg.com/zksync-ethers/-/zksync-ethers-6.9.0.tgz#efaff1d59e2cff837eeda84c4ba59fdca4972a91" integrity sha512-2CppwvLHtz689L7E9EhevbFtsqVukKC/lVicwdeUS2yqV46ET4iBR11rYdEfGW2oEo1h6yJuuwIBDFm2SybkIA== - -zksync-web3@^0.15.4: - version "0.15.5" - resolved "https://registry.yarnpkg.com/zksync-web3/-/zksync-web3-0.15.5.tgz#aabe379464963ab573e15948660a709f409b5316" - integrity sha512-97gB7OKJL4spegl8fGO54g6cvTd/75G6yFWZWEa2J09zhjTrfqabbwE/GwiUJkFQ5BbzoH4JaTlVz1hoYZI+DQ== - dependencies: - ethers "~5.7.0" diff --git a/zkstack_cli/crates/common/src/contracts.rs b/zkstack_cli/crates/common/src/contracts.rs new file mode 100644 index 00000000000..c95849131c1 --- /dev/null +++ b/zkstack_cli/crates/common/src/contracts.rs @@ -0,0 +1,42 @@ +use std::path::PathBuf; + +use xshell::{cmd, Shell}; + +use crate::cmd::Cmd; + +pub fn build_test_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("etc/contracts-test-data")); + Cmd::new(cmd!(shell, "yarn install")).run()?; + Ok(Cmd::new(cmd!(shell, "yarn build")).run()?) +} + +pub fn build_l1_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/l1-contracts")); + Ok(Cmd::new(cmd!(shell, "forge build")).run()?) +} + +pub fn build_l2_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/l2-contracts")); + Ok(Cmd::new(cmd!( + shell, + "forge build --zksync --zk-enable-eravm-extensions" + )) + .run()?) +} + +pub fn build_system_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/system-contracts")); + Cmd::new(cmd!(shell, "yarn install")).run()?; + Cmd::new(cmd!(shell, "yarn preprocess:system-contracts")).run()?; + Cmd::new(cmd!( + shell, + "forge build --zksync --zk-enable-eravm-extensions" + )) + .run()?; + Cmd::new(cmd!(shell, "yarn preprocess:bootloader")).run()?; + Ok(Cmd::new(cmd!( + shell, + "forge build --zksync --zk-enable-eravm-extensions" + )) + .run()?) +} diff --git a/zkstack_cli/crates/common/src/lib.rs b/zkstack_cli/crates/common/src/lib.rs index b0fbdab0d1b..9680bdd8df3 100644 --- a/zkstack_cli/crates/common/src/lib.rs +++ b/zkstack_cli/crates/common/src/lib.rs @@ -4,6 +4,7 @@ mod term; pub mod cmd; pub mod config; +pub mod contracts; pub mod db; pub mod docker; pub mod ethereum; diff --git a/zkstack_cli/crates/common/src/prerequisites.rs b/zkstack_cli/crates/common/src/prerequisites.rs index 7845249a1ed..72d3c7d8041 100644 --- a/zkstack_cli/crates/common/src/prerequisites.rs +++ b/zkstack_cli/crates/common/src/prerequisites.rs @@ -2,74 +2,99 @@ use xshell::{cmd, Shell}; use crate::{cmd::Cmd, logger}; -const PREREQUISITES: [Prerequisite; 5] = [ - Prerequisite { - name: "git", - download_link: "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git", - }, - Prerequisite { - name: "docker", - download_link: "https://docs.docker.com/get-docker/", - }, - Prerequisite { - name: "forge", - download_link: "https://book.getfoundry.sh/getting-started/installation", - }, - Prerequisite { - name: "cargo", - download_link: "https://doc.rust-lang.org/cargo/getting-started/installation.html", - }, - Prerequisite { - name: "yarn", - download_link: "https://yarnpkg.com/getting-started/install", - }, -]; +fn prerequisites() -> [Prerequisite; 5] { + [ + Prerequisite { + name: "git", + download_link: "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git", + custom_validator: None, + }, + Prerequisite { + name: "docker", + download_link: "https://docs.docker.com/get-docker/", + custom_validator: None, + }, + Prerequisite { + name: "forge", + download_link: + "https://github.com/matter-labs/foundry-zksync?tab=readme-ov-file#quick-install", + custom_validator: Some(Box::new(|| { + let shell = Shell::new().unwrap(); + let Ok(result) = Cmd::new(cmd!(shell, "forge build --help")).run_with_output() + else { + return false; + }; + let Ok(stdout) = String::from_utf8(result.stdout) else { + return false; + }; + stdout.contains("ZKSync configuration") + })), + }, + Prerequisite { + name: "cargo", + download_link: "https://doc.rust-lang.org/cargo/getting-started/installation.html", + custom_validator: None, + }, + Prerequisite { + name: "yarn", + download_link: "https://yarnpkg.com/getting-started/install", + custom_validator: None, + }, + ] +} const DOCKER_COMPOSE_PREREQUISITE: Prerequisite = Prerequisite { name: "docker compose", download_link: "https://docs.docker.com/compose/install/", + custom_validator: None, }; pub const GPU_PREREQUISITES: [Prerequisite; 3] = [ Prerequisite { name: "cmake", download_link: "https://cmake.org/download/", + custom_validator: None, }, Prerequisite { name: "nvcc", download_link: "https://developer.nvidia.com/cuda-downloads", + custom_validator: None, }, // CUDA toolkit Prerequisite { name: "nvidia-smi", download_link: "https://developer.nvidia.com/cuda-downloads", + custom_validator: None, }, // CUDA GPU driver ]; pub const GCLOUD_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "gcloud", download_link: "https://cloud.google.com/sdk/docs/install", + custom_validator: None, }]; pub const PROVER_CLI_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "prover_cli", download_link: "https://github.com/matter-labs/zksync-era/tree/main/prover/crates/bin/prover_cli", + custom_validator: None, }]; pub struct Prerequisite { name: &'static str, download_link: &'static str, + custom_validator: Option bool>>, } pub fn check_general_prerequisites(shell: &Shell) { - check_prerequisites(shell, &PREREQUISITES, true); + check_prerequisites(shell, &prerequisites(), true); } pub fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { let mut missing_prerequisites = vec![]; for prerequisite in prerequisites { - if !check_prerequisite(shell, prerequisite.name) { + if !check_prerequisite(shell, prerequisite) { missing_prerequisites.push(prerequisite); } } @@ -95,8 +120,15 @@ pub fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_ } } -fn check_prerequisite(shell: &Shell, name: &str) -> bool { - Cmd::new(cmd!(shell, "which {name}")).run().is_ok() +fn check_prerequisite(shell: &Shell, prerequisite: &Prerequisite) -> bool { + let name = prerequisite.name; + if Cmd::new(cmd!(shell, "which {name}")).run().is_err() { + return false; + } + let Some(custom) = &prerequisite.custom_validator else { + return true; + }; + custom() } fn check_docker_compose_prerequisite(shell: &Shell) -> bool { diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs index 5a4f1f86f35..8dbd5c371c8 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs @@ -2,7 +2,7 @@ use std::path::Path; use anyhow::Context; use common::{ - cmd::Cmd, + contracts::build_l2_contracts, forge::{Forge, ForgeScriptArgs}, spinner::Spinner, }; @@ -20,7 +20,7 @@ use config::{ traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, ChainConfig, ContractsConfig, EcosystemConfig, }; -use xshell::{cmd, Shell}; +use xshell::Shell; use crate::{ messages::{ @@ -121,7 +121,7 @@ async fn build_and_deploy( signature: Option<&str>, mut update_config: impl FnMut(&Shell, &Path) -> anyhow::Result<()>, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; + build_l2_contracts(shell.clone(), ecosystem_config.link_to_code.clone())?; call_forge(shell, chain_config, ecosystem_config, forge_args, signature).await?; update_config( shell, @@ -283,8 +283,3 @@ async fn call_forge( forge.run(shell)?; Ok(()) } - -fn build_l2_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(link_to_code.join("contracts")); - Ok(Cmd::new(cmd!(shell, "yarn l2 build")).run()?) -} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs index 6f420e66ba0..fbafaec09e6 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs @@ -1,16 +1,21 @@ use std::path::PathBuf; use clap::Parser; -use common::{cmd::Cmd, logger, spinner::Spinner}; +use common::{ + contracts::{ + build_l1_contracts, build_l2_contracts, build_system_contracts, build_test_contracts, + }, + logger, + spinner::Spinner, +}; use config::EcosystemConfig; -use xshell::{cmd, Shell}; +use xshell::Shell; use crate::commands::dev::messages::{ MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, - MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_CONTRACTS_DEPS_SPINNER, - MSG_NOTHING_TO_BUILD_MSG, + MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_NOTHING_TO_BUILD_MSG, }; #[derive(Debug, Parser)] @@ -67,53 +72,41 @@ pub enum ContractType { TestContracts, } -#[derive(Debug)] struct ContractBuilder { - dir: PathBuf, - cmd: String, + cmd: Box anyhow::Result<()>>, msg: String, + link_to_code: PathBuf, } impl ContractBuilder { fn new(ecosystem: &EcosystemConfig, contract_type: ContractType) -> Self { match contract_type { ContractType::L1 => Self { - dir: ecosystem.path_to_foundry(), - cmd: "forge build".to_string(), + cmd: Box::new(build_l1_contracts), msg: MSG_BUILDING_L1_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::L2 => Self { - dir: ecosystem.link_to_code.clone(), - cmd: "yarn l2-contracts build".to_string(), + cmd: Box::new(build_l2_contracts), msg: MSG_BUILDING_L2_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::SystemContracts => Self { - dir: ecosystem.link_to_code.join("contracts"), - cmd: "yarn sc build".to_string(), + cmd: Box::new(build_system_contracts), msg: MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::TestContracts => Self { - dir: ecosystem.link_to_code.join("etc/contracts-test-data"), - cmd: "yarn build".to_string(), + cmd: Box::new(build_test_contracts), msg: MSG_BUILDING_TEST_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, } } - fn build(&self, shell: &Shell) -> anyhow::Result<()> { + fn build(self, shell: Shell) -> anyhow::Result<()> { let spinner = Spinner::new(&self.msg); - let _dir_guard = shell.push_dir(&self.dir); - - let mut args = self.cmd.split_whitespace().collect::>(); - let command = args.remove(0); // It's safe to unwrap here because we know that the vec is not empty - let mut cmd = cmd!(shell, "{command}"); - - for arg in args { - cmd = cmd.arg(arg); - } - - Cmd::new(cmd).run()?; - + (self.cmd)(shell, self.link_to_code.clone())?; spinner.finish(); Ok(()) } @@ -129,17 +122,11 @@ pub fn run(shell: &Shell, args: ContractsArgs) -> anyhow::Result<()> { logger::info(MSG_BUILDING_CONTRACTS); let ecosystem = EcosystemConfig::from_file(shell)?; - let link_to_code = ecosystem.link_to_code.clone(); - - let spinner = Spinner::new(MSG_CONTRACTS_DEPS_SPINNER); - let _dir_guard = shell.push_dir(&link_to_code); - Cmd::new(cmd!(shell, "yarn install")).run()?; - spinner.finish(); contracts .iter() .map(|contract| ContractBuilder::new(&ecosystem, *contract)) - .try_for_each(|builder| builder.build(shell))?; + .try_for_each(|builder| builder.build(shell.clone()))?; logger::outro(MSG_BUILDING_CONTRACTS_SUCCESS); diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs new file mode 100644 index 00000000000..683ffe19916 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs @@ -0,0 +1,26 @@ +use anyhow::Context; +use common::{cmd::Cmd, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::{ + commands::dev::{ + commands::database::reset::reset_database, dals::get_core_dal, + messages::MSG_GENESIS_FILE_GENERATION_STARTED, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, +}; + +pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_chain(Some(ecosystem.current_chain().to_string())) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let spinner = Spinner::new(MSG_GENESIS_FILE_GENERATION_STARTED); + let secrets_path = chain.path_to_secrets_config(); + let dal = get_core_dal(shell, None)?; + reset_database(shell, ecosystem.link_to_code, dal).await?; + Cmd::new(cmd!(shell,"cargo run --package genesis_generator --bin genesis_generator -- --config-path={secrets_path}")).run()?; + spinner.finish(); + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs index 38ec586e745..ab98e44533f 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs @@ -3,6 +3,7 @@ pub mod config_writer; pub mod contracts; pub mod database; pub mod fmt; +pub mod genesis; pub mod lint; pub(crate) mod lint_utils; pub mod prover; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs index f48967f5973..dea6a46bbef 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs @@ -6,8 +6,8 @@ use super::utils::{build_contracts, install_and_build_dependencies}; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; - build_contracts(shell, &ecosystem_config)?; install_and_build_dependencies(shell, &ecosystem_config)?; + build_contracts(shell, &ecosystem_config)?; Ok(()) } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs index 3bc3093bf93..8e9e421c2f4 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs @@ -28,8 +28,8 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { logger::info(msg_integration_tests_run(args.external_node)); if !args.no_deps { - build_contracts(shell, &ecosystem_config)?; install_and_build_dependencies(shell, &ecosystem_config)?; + build_contracts(shell, &ecosystem_config)?; } let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs index 00617e26064..c7e639f8e87 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs @@ -4,6 +4,8 @@ use super::commands::lint_utils::Target; pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; // Subcommands help +pub(super) const MSG_GENERATE_GENESIS_ABOUT: &str = + "Generate new genesis file based on current contracts"; pub(super) const MSG_PROVER_VERSION_ABOUT: &str = "Protocol version used by provers"; pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; @@ -106,7 +108,6 @@ pub(super) const MSG_RESETTING_TEST_DATABASES: &str = "Resetting test databases" // Contract building related messages pub(super) const MSG_NOTHING_TO_BUILD_MSG: &str = "Nothing to build!"; pub(super) const MSG_BUILDING_CONTRACTS: &str = "Building contracts"; -pub(super) const MSG_CONTRACTS_DEPS_SPINNER: &str = "Installing dependencies.."; pub(super) const MSG_BUILDING_L2_CONTRACTS_SPINNER: &str = "Building L2 contracts.."; pub(super) const MSG_BUILDING_L1_CONTRACTS_SPINNER: &str = "Building L1 contracts.."; pub(super) const MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER: &str = "Building system contracts.."; @@ -230,3 +231,6 @@ pub(super) const MSG_UNABLE_TO_WRITE_FILE_ERR: &str = "Unable to write data to f pub(super) const MSG_UNABLE_TO_READ_PARSE_JSON_ERR: &str = "Unable to parse JSON"; pub(super) const MSG_FAILED_TO_SEND_TXN_ERR: &str = "Failed to send transaction"; pub(super) const MSG_INVALID_L1_RPC_URL_ERR: &str = "Invalid L1 RPC URL"; + +// Genesis +pub(super) const MSG_GENESIS_FILE_GENERATION_STARTED: &str = "Regenerate genesis file"; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs index e8d23f15b69..9272436a9b9 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs @@ -7,9 +7,10 @@ use self::commands::{ send_transactions::args::SendTransactionsArgs, snapshot::SnapshotCommands, test::TestCommands, }; use crate::commands::dev::messages::{ - MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, - MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, - MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, + MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, MSG_GENERATE_GENESIS_ABOUT, + MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, + MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, + MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, }; mod commands; @@ -40,6 +41,8 @@ pub enum DevCommands { ConfigWriter(ConfigWriterArgs), #[command(about = MSG_SEND_TXNS_ABOUT)] SendTransactions(SendTransactionsArgs), + #[command(about = MSG_GENERATE_GENESIS_ABOUT, alias = "genesis")] + GenerateGenesis, } pub async fn run(shell: &Shell, args: DevCommands) -> anyhow::Result<()> { @@ -56,6 +59,7 @@ pub async fn run(shell: &Shell, args: DevCommands) -> anyhow::Result<()> { DevCommands::SendTransactions(args) => { commands::send_transactions::run(shell, args).await? } + DevCommands::GenerateGenesis => commands::genesis::run(shell).await?, } Ok(()) } diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs index bf5a4605c09..6e006f8d65d 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs @@ -3,6 +3,7 @@ use std::{path::PathBuf, str::FromStr}; use anyhow::Context; use common::{ config::global_config, + contracts::build_system_contracts, forge::{Forge, ForgeScriptArgs}, git, logger, spinner::Spinner, @@ -26,7 +27,6 @@ use super::{ args::init::{EcosystemArgsFinal, EcosystemInitArgs, EcosystemInitArgsFinal}, common::deploy_l1, setup_observability, - utils::{build_system_contracts, install_yarn_dependencies}, }; use crate::{ accept_ownership::{accept_admin, accept_owner}, @@ -108,8 +108,7 @@ async fn init_ecosystem( initial_deployment_config: &InitialDeploymentConfig, ) -> anyhow::Result { let spinner = Spinner::new(MSG_INTALLING_DEPS_SPINNER); - install_yarn_dependencies(shell, &ecosystem_config.link_to_code)?; - build_system_contracts(shell, &ecosystem_config.link_to_code)?; + build_system_contracts(shell.clone(), ecosystem_config.link_to_code.clone())?; spinner.finish(); let contracts = deploy_ecosystem( From 6159f7531a0340a69c4926c4e0325811ed7cabb8 Mon Sep 17 00:00:00 2001 From: Joonatan Saarhelo Date: Tue, 15 Oct 2024 13:23:25 +0100 Subject: [PATCH 064/140] fix: restore instruction count functionality (#3081) Brings back instruction counts that show that bytecode has changed rather than VM performance. They were previously disabled because vm2 didn't support tracers. --- Cargo.lock | 1 + core/tests/vm-benchmark/Cargo.toml | 1 + core/tests/vm-benchmark/benches/iai.rs | 1 + .../src/bin/compare_iai_results.rs | 31 +++++----- .../src/bin/instruction_counts.rs | 9 ++- .../vm-benchmark/src/instruction_counter.rs | 1 - core/tests/vm-benchmark/src/vm.rs | 58 ++++++++++++++----- 7 files changed, 71 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 11c37bda57f..774471d3d6c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8743,6 +8743,7 @@ dependencies = [ "zksync_types", "zksync_utils", "zksync_vlog", + "zksync_vm2", ] [[package]] diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 4586c637e12..59c1e21493b 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -11,6 +11,7 @@ zksync_multivm.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_vlog.workspace = true +zksync_vm2.workspace = true criterion.workspace = true once_cell.workspace = true diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index 6b8965afa4f..8cbb9f10dd8 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -31,4 +31,5 @@ make_functions_and_main!( write_and_decode => write_and_decode_legacy, event_spam => event_spam_legacy, slot_hash_collision => slot_hash_collision_legacy, + heap_read_write => heap_read_write_legacy, ); diff --git a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs index faf72a18f45..c274b039c9b 100644 --- a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs +++ b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs @@ -25,14 +25,7 @@ fn main() { .keys() .collect::>() .intersection(&iai_after.keys().collect()) - .filter_map(|&name| { - let diff = percent_difference(iai_before[name], iai_after[name]); - if diff.abs() > 2. { - Some((name, format!("{:+.1}%", diff))) - } else { - None - } - }) + .map(|&name| (name, percent_difference(iai_before[name], iai_after[name]))) .collect::>(); let duration_changes = opcodes_before @@ -47,12 +40,17 @@ fn main() { let mut nonzero_diff = false; - for name in perf_changes.keys().collect::>().union( - &duration_changes - .iter() - .filter_map(|(key, value)| (*value != 0).then_some(key)) - .collect(), - ) { + for name in perf_changes + .iter() + .filter_map(|(key, value)| (value.abs() > 2.).then_some(key)) + .collect::>() + .union( + &duration_changes + .iter() + .filter_map(|(key, value)| (*value != 0).then_some(key)) + .collect(), + ) + { // write the header before writing the first line of diff if !nonzero_diff { println!("Benchmark name | change in estimated runtime | change in number of opcodes executed \n--- | --- | ---"); @@ -63,7 +61,10 @@ fn main() { println!( "{} | {} | {}", name, - perf_changes.get(**name).unwrap_or(&n_a.clone()), + perf_changes + .get(**name) + .map(|percent| format!("{:+.1}%", percent)) + .unwrap_or(n_a.clone()), duration_changes .get(**name) .map(|abs_diff| format!( diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs index f9bb04c01bf..96208007fd9 100644 --- a/core/tests/vm-benchmark/src/bin/instruction_counts.rs +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -1,11 +1,16 @@ //! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. -use vm_benchmark::{BenchmarkingVm, BYTECODES}; +use vm_benchmark::{BenchmarkingVmFactory, Fast, Legacy, BYTECODES}; fn main() { for bytecode in BYTECODES { let tx = bytecode.deploy_tx(); let name = bytecode.name; - println!("{name} {}", BenchmarkingVm::new().instruction_count(&tx)); + println!("{name} {}", Fast::<()>::count_instructions(&tx)); + println!( + "{} {}", + name.to_string() + "_legacy", + Legacy::count_instructions(&tx) + ); } } diff --git a/core/tests/vm-benchmark/src/instruction_counter.rs b/core/tests/vm-benchmark/src/instruction_counter.rs index 48b1e3527ad..0899c4c9171 100644 --- a/core/tests/vm-benchmark/src/instruction_counter.rs +++ b/core/tests/vm-benchmark/src/instruction_counter.rs @@ -13,7 +13,6 @@ pub struct InstructionCounter { /// A tracer that counts the number of instructions executed by the VM. impl InstructionCounter { - #[allow(dead_code)] // FIXME: re-enable instruction counting once new tracers are merged pub fn new(output: Rc>) -> Self { Self { count: 0, output } } diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index f4a0010f29e..30e2321298f 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -9,8 +9,8 @@ use zksync_multivm::{ VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, - vm_fast, vm_latest, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled}, + vm_fast, + vm_latest::{self, constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled, ToTracerPointer}, zk_evm_latest::ethereum_types::{Address, U256}, }; use zksync_types::{ @@ -20,7 +20,7 @@ use zksync_types::{ }; use zksync_utils::bytecode::hash_bytecode; -use crate::transaction::PRIVATE_KEY; +use crate::{instruction_counter::InstructionCounter, transaction::PRIVATE_KEY}; static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); @@ -72,16 +72,19 @@ pub trait BenchmarkingVmFactory { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance; + + /// Counts instructions executed by the VM while processing the transaction. + fn count_instructions(tx: &Transaction) -> usize; } /// Factory for the new / fast VM. #[derive(Debug)] -pub struct Fast(()); +pub struct Fast(Tr); -impl BenchmarkingVmFactory for Fast { +impl BenchmarkingVmFactory for Fast { const LABEL: VmLabel = VmLabel::Fast; - type Instance = vm_fast::Vm<&'static InMemoryStorage>; + type Instance = vm_fast::Vm<&'static InMemoryStorage, Tr>; fn create( batch_env: L1BatchEnv, @@ -90,6 +93,29 @@ impl BenchmarkingVmFactory for Fast { ) -> Self::Instance { vm_fast::Vm::custom(batch_env, system_env, storage) } + + fn count_instructions(tx: &Transaction) -> usize { + let mut vm = BenchmarkingVm::>::default(); + vm.0.push_transaction(tx.clone()); + + #[derive(Default)] + struct InstructionCount(usize); + impl vm_fast::Tracer for InstructionCount { + fn before_instruction< + OP: zksync_vm2::interface::OpcodeType, + S: zksync_vm2::interface::StateInterface, + >( + &mut self, + _: &mut S, + ) { + self.0 += 1; + } + } + let mut tracer = InstructionCount(0); + + vm.0.inspect(&mut tracer, VmExecutionMode::OneTx); + tracer.0 + } } /// Factory for the legacy VM (latest version). @@ -109,6 +135,19 @@ impl BenchmarkingVmFactory for Legacy { let storage = StorageView::new(storage).to_rc_ptr(); vm_latest::Vm::new(batch_env, system_env, storage) } + + fn count_instructions(tx: &Transaction) -> usize { + let mut vm = BenchmarkingVm::::default(); + vm.0.push_transaction(tx.clone()); + let count = Rc::new(RefCell::new(0)); + vm.0.inspect( + &mut InstructionCounter::new(count.clone()) + .into_tracer_pointer() + .into(), + VmExecutionMode::OneTx, + ); + count.take() + } } #[derive(Debug)] @@ -169,13 +208,6 @@ impl BenchmarkingVm { } tx_result } - - pub fn instruction_count(&mut self, tx: &Transaction) -> usize { - self.0.push_transaction(tx.clone()); - let count = Rc::new(RefCell::new(0)); - self.0.execute(VmExecutionMode::OneTx); // FIXME: re-enable instruction counting once new tracers are merged - count.take() - } } impl BenchmarkingVm { From 2152ec9206844ff604274b96b389cb59a1ac0385 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 15 Oct 2024 15:10:11 +0200 Subject: [PATCH 065/140] fix(ci): install zkstack at the correct time (#3092) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. Signed-off-by: Danil --- .github/workflows/vm-perf-to-prometheus.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 3a2008e1f8e..d336a1472e4 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -28,17 +28,17 @@ jobs: echo "RUSTC_WRAPPER=sccache" >> .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH - - - name: Install zkstack - run: | - ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true - ci_run zkstackup -g --local - name: init run: | run_retried docker compose pull zk docker compose up -d zk + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: build contracts run: | ci_run zkstack dev contracts From 2baf189c2594efbd0f3a983d2a2d5f245eeb47ff Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 15 Oct 2024 16:39:10 +0300 Subject: [PATCH 066/140] feat(contract-verifier): add compilers 1.5.5 (#3096) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ add zk-compilers 1.5.5 and era-solc 0.8.28-1.0.1 ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- docker/contract-verifier/Dockerfile | 4 ++-- docker/contract-verifier/install-all-solc.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index d87a0dea1e0..7ed18626a1b 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -47,7 +47,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 5); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ @@ -68,7 +68,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 5); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ diff --git a/docker/contract-verifier/install-all-solc.sh b/docker/contract-verifier/install-all-solc.sh index 4fe992f8357..0c24b074130 100755 --- a/docker/contract-verifier/install-all-solc.sh +++ b/docker/contract-verifier/install-all-solc.sh @@ -26,7 +26,7 @@ done # Download zkVM solc list=( "0.8.25-1.0.0" "0.8.24-1.0.0" "0.8.23-1.0.0" "0.8.22-1.0.0" "0.8.21-1.0.0" "0.8.20-1.0.0" "0.8.19-1.0.0" "0.8.18-1.0.0" "0.8.17-1.0.0" "0.8.16-1.0.0" "0.8.15-1.0.0" "0.8.14-1.0.0" "0.8.13-1.0.0" "0.8.12-1.0.0" "0.8.11-1.0.0" "0.8.10-1.0.0" "0.8.9-1.0.0" "0.8.8-1.0.0" "0.8.7-1.0.0" "0.8.6-1.0.0" "0.8.5-1.0.0" "0.8.4-1.0.0" "0.8.3-1.0.0" "0.8.2-1.0.0" "0.8.1-1.0.0" "0.8.0-1.0.0" "0.7.6-1.0.0" "0.7.5-1.0.0" "0.7.4-1.0.0" "0.7.3-1.0.0" "0.7.2-1.0.0" "0.7.1-1.0.0" "0.7.0-1.0.0" "0.6.12-1.0.0" "0.6.11-1.0.0" "0.6.10-1.0.0" "0.6.9-1.0.0" "0.6.8-1.0.0" "0.6.7-1.0.0" "0.6.6-1.0.0" "0.6.5-1.0.0" "0.6.4-1.0.0" "0.6.3-1.0.0" "0.6.2-1.0.0" "0.6.1-1.0.0" "0.6.0-1.0.0" "0.5.17-1.0.0" "0.5.16-1.0.0" "0.5.15-1.0.0" "0.5.14-1.0.0" "0.5.13-1.0.0" "0.5.12-1.0.0" "0.5.11-1.0.0" "0.5.10-1.0.0" "0.5.9-1.0.0" "0.5.8-1.0.0" "0.5.7-1.0.0" "0.5.6-1.0.0" "0.5.5-1.0.0" "0.5.4-1.0.0" "0.5.3-1.0.0" "0.5.2-1.0.0" "0.5.1-1.0.0" "0.5.0-1.0.0" "0.4.26-1.0.0" "0.4.25-1.0.0" "0.4.24-1.0.0" "0.4.23-1.0.0" "0.4.22-1.0.0" "0.4.21-1.0.0" "0.4.20-1.0.0" "0.4.19-1.0.0" "0.4.18-1.0.0" "0.4.17-1.0.0" "0.4.16-1.0.0" "0.4.15-1.0.0" "0.4.14-1.0.0" "0.4.13-1.0.0" "0.4.12-1.0.0" - "0.8.27-1.0.1" "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" + "0.8.28-1.0.1" "0.8.27-1.0.1" "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" ) for version in ${list[@]}; do From 76140e66eace3431e8dbbed7cbc1f8a1585fca65 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 15 Oct 2024 16:38:18 +0200 Subject: [PATCH 067/140] fix(ci): Do not build test contracts (#3100) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. Signed-off-by: Danil --- .github/workflows/build-core-template.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 18b444a99ed..c11807c4dca 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -138,7 +138,7 @@ jobs: if: env.BUILD_CONTRACTS == 'true' run: | ci_run cp etc/tokens/{test,localhost}.json - ci_run zkstack dev contracts + ci_run zkstack dev contracts --system-contracts --l1-contracts --l2-contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} From d9284d669c9d23a876690b79f9bd6d97ca0c7789 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 15 Oct 2024 18:22:48 +0200 Subject: [PATCH 068/140] fix(ci): Don't build zkstack if it's not necessary (#3101) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. Signed-off-by: Danil --- .github/workflows/build-core-template.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index c11807c4dca..fe1d2342764 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -130,6 +130,7 @@ jobs: ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true ci_run zkstackup -g --local From 331fe876705e708585f5b63add98a7eee9d76f25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Tue, 15 Oct 2024 18:12:27 +0100 Subject: [PATCH 069/140] fix: Changed debug page address on config file (#3102) Debug page address was set to 127.0.0.1:5000 on the consensus config file for the docker node. That didn't let users access the page from outside the container. It was set to 0.0.0.0:5000 and it now works. --- .../configs/mainnet_consensus_config.yaml | 2 +- .../configs/testnet_consensus_config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml index c2bef23b2e4..f2a0ce31875 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml @@ -1,6 +1,6 @@ server_addr: '0.0.0.0:3054' public_addr: '127.0.0.1:3054' -debug_page_addr: '127.0.0.1:5000' +debug_page_addr: '0.0.0.0:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 gossip_static_outbound: diff --git a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml index 7a82705990c..a5f752fe405 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml @@ -1,6 +1,6 @@ server_addr: '0.0.0.0:3054' public_addr: '127.0.0.1:3054' -debug_page_addr: '127.0.0.1:5000' +debug_page_addr: '0.0.0.0:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 gossip_static_outbound: From 67475292ff770d2edd6884be27f976a4144778ae Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 16 Oct 2024 10:19:14 +0300 Subject: [PATCH 070/140] feat(api): Make acceptable values cache lag configurable (#3028) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Allows to configure acceptable values cache lag (measured in the number of L2 blocks). Increases the default value from 5 to 20 blocks. ## Why ❔ Currently, acceptable lag is hard-coded and is arguably too small at times. It can lead to the values cache getting reset during Postgres usage surges (e.g., when sealing an L1 batch). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/bin/external_node/src/node_builder.rs | 1 + core/bin/zksync_server/src/node_builder.rs | 1 + core/lib/config/src/configs/api.rs | 34 ++-- core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/api.rs | 2 + core/lib/protobuf_config/src/api.rs | 8 +- .../src/proto/config/api.proto | 1 + core/lib/state/src/cache/lru_cache.rs | 7 + core/lib/state/src/postgres/mod.rs | 185 ++++++++++-------- core/lib/state/src/postgres/tests.rs | 4 +- .../layers/web3_api/tx_sender.rs | 12 +- 11 files changed, 157 insertions(+), 99 deletions(-) diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 14e09b9c2a7..7d848901353 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -378,6 +378,7 @@ impl ExternalNodeBuilder { factory_deps_cache_size: self.config.optional.factory_deps_cache_size() as u64, initial_writes_cache_size: self.config.optional.initial_writes_cache_size() as u64, latest_values_cache_size: self.config.optional.latest_values_cache_size() as u64, + latest_values_max_block_lag: 20, // reasonable default }; let max_vm_concurrency = self.config.optional.vm_concurrency_limit; let tx_sender_layer = TxSenderLayer::new( diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 9fdbc129b19..b04227965f8 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -305,6 +305,7 @@ impl MainNodeBuilder { factory_deps_cache_size: rpc_config.factory_deps_cache_size() as u64, initial_writes_cache_size: rpc_config.initial_writes_cache_size() as u64, latest_values_cache_size: rpc_config.latest_values_cache_size() as u64, + latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(), }; // On main node we always use master pool sink. diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 86c9ebd074d..ce0d9612958 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -189,6 +189,10 @@ pub struct Web3JsonRpcConfig { /// Latest values cache size in MiBs. The default value is 128 MiB. If set to 0, the latest /// values cache will be disabled. pub latest_values_cache_size_mb: Option, + /// Maximum lag in the number of blocks for the latest values cache after which the cache is reset. Greater values + /// lead to increased the cache update latency, i.e., less storage queries being processed by the cache. OTOH, smaller values + /// can lead to spurious resets when Postgres lags for whatever reason (e.g., when sealing L1 batches). + pub latest_values_max_block_lag: Option, /// Limit for fee history block range. pub fee_history_limit: Option, /// Maximum number of requests in a single batch JSON RPC request. Default is 500. @@ -243,20 +247,21 @@ impl Web3JsonRpcConfig { estimate_gas_acceptable_overestimation: 1000, estimate_gas_optimize_search: false, max_tx_size: 1000000, - vm_execution_cache_misses_limit: Default::default(), - vm_concurrency_limit: Default::default(), - factory_deps_cache_size_mb: Default::default(), - initial_writes_cache_size_mb: Default::default(), - latest_values_cache_size_mb: Default::default(), - fee_history_limit: Default::default(), - max_batch_request_size: Default::default(), - max_response_body_size_mb: Default::default(), + vm_execution_cache_misses_limit: None, + vm_concurrency_limit: None, + factory_deps_cache_size_mb: None, + initial_writes_cache_size_mb: None, + latest_values_cache_size_mb: None, + latest_values_max_block_lag: None, + fee_history_limit: None, + max_batch_request_size: None, + max_response_body_size_mb: None, max_response_body_size_overrides_mb: MaxResponseSizeOverrides::empty(), - websocket_requests_per_minute_limit: Default::default(), - mempool_cache_update_interval: Default::default(), - mempool_cache_size: Default::default(), + websocket_requests_per_minute_limit: None, + mempool_cache_update_interval: None, + mempool_cache_size: None, tree_api_url: None, - whitelisted_tokens_for_aa: Default::default(), + whitelisted_tokens_for_aa: vec![], api_namespaces: None, extended_api_tracing: false, } @@ -308,6 +313,11 @@ impl Web3JsonRpcConfig { self.latest_values_cache_size_mb.unwrap_or(128) * super::BYTES_IN_MEGABYTE } + /// Returns the maximum lag in the number of blocks for the latest values cache. + pub fn latest_values_max_block_lag(&self) -> u32 { + self.latest_values_max_block_lag.map_or(20, NonZeroU32::get) + } + pub fn fee_history_limit(&self) -> u64 { self.fee_history_limit.unwrap_or(1024) } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 960808aa6a6..0fdd927d19f 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -86,6 +86,7 @@ impl Distribution for EncodeDist { factory_deps_cache_size_mb: self.sample(rng), initial_writes_cache_size_mb: self.sample(rng), latest_values_cache_size_mb: self.sample(rng), + latest_values_max_block_lag: self.sample(rng), fee_history_limit: self.sample(rng), max_batch_request_size: self.sample(rng), max_response_body_size_mb: self.sample(rng), diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 53efea9a784..ecc2343d49f 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -76,6 +76,7 @@ mod tests { factory_deps_cache_size_mb: Some(128), initial_writes_cache_size_mb: Some(32), latest_values_cache_size_mb: Some(256), + latest_values_max_block_lag: Some(NonZeroU32::new(50).unwrap()), fee_history_limit: Some(100), max_batch_request_size: Some(200), max_response_body_size_mb: Some(10), @@ -136,6 +137,7 @@ mod tests { API_WEB3_JSON_RPC_FACTORY_DEPS_CACHE_SIZE_MB=128 API_WEB3_JSON_RPC_INITIAL_WRITES_CACHE_SIZE_MB=32 API_WEB3_JSON_RPC_LATEST_VALUES_CACHE_SIZE_MB=256 + API_WEB3_JSON_RPC_LATEST_VALUES_MAX_BLOCK_LAG=50 API_WEB3_JSON_RPC_FEE_HISTORY_LIMIT=100 API_WEB3_JSON_RPC_MAX_BATCH_REQUEST_SIZE=200 API_WEB3_JSON_RPC_WEBSOCKET_REQUESTS_PER_MINUTE_LIMIT=10 diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index a0c3825228a..3db80c6d691 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use std::num::{NonZeroU32, NonZeroUsize}; use anyhow::Context as _; use zksync_config::configs::{api, ApiConfig}; @@ -113,6 +113,11 @@ impl ProtoRepr for proto::Web3JsonRpc { .map(|x| x.try_into()) .transpose() .context("latest_values_cache_size_mb")?, + latest_values_max_block_lag: self + .latest_values_max_block_lag + .map(|x| x.try_into()) + .transpose() + .context("latest_values_max_block_lag")?, fee_history_limit: self.fee_history_limit, max_batch_request_size: self .max_batch_request_size @@ -183,6 +188,7 @@ impl ProtoRepr for proto::Web3JsonRpc { latest_values_cache_size_mb: this .latest_values_cache_size_mb .map(|x| x.try_into().unwrap()), + latest_values_max_block_lag: this.latest_values_max_block_lag.map(NonZeroU32::get), fee_history_limit: this.fee_history_limit, max_batch_request_size: this.max_batch_request_size.map(|x| x.try_into().unwrap()), max_response_body_size_mb: this diff --git a/core/lib/protobuf_config/src/proto/config/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto index 68475e442fd..89ba0a6bcd2 100644 --- a/core/lib/protobuf_config/src/proto/config/api.proto +++ b/core/lib/protobuf_config/src/proto/config/api.proto @@ -41,6 +41,7 @@ message Web3JsonRpc { repeated string api_namespaces = 32; // Optional, if empty all namespaces are available optional bool extended_api_tracing = 33; // optional, default false optional bool estimate_gas_optimize_search = 34; // optional, default false + optional uint32 latest_values_max_block_lag = 35; // optional reserved 15; reserved "l1_to_l2_transactions_compatibility_mode"; reserved 11; reserved "request_timeout"; diff --git a/core/lib/state/src/cache/lru_cache.rs b/core/lib/state/src/cache/lru_cache.rs index fa37bdb3e22..55b037bbb8c 100644 --- a/core/lib/state/src/cache/lru_cache.rs +++ b/core/lib/state/src/cache/lru_cache.rs @@ -46,6 +46,13 @@ where Self { name, cache } } + /// Returns the capacity of this cache in bytes. + pub fn capacity(&self) -> u64 { + self.cache + .as_ref() + .map_or(0, |cache| cache.policy().max_capacity().unwrap_or(u64::MAX)) + } + /// Gets an entry and pulls it to the front if it exists. pub fn get(&self, key: &K) -> Option { let latency = METRICS.latency[&(self.name, Method::Get)].start(); diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 67866634ee4..f689f1487f3 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -72,8 +72,7 @@ impl CacheValue for TimestampedStorageValue { #[allow(clippy::cast_possible_truncation)] // doesn't happen in practice fn cache_weight(&self) -> u32 { const WEIGHT: usize = mem::size_of::() + mem::size_of::(); - // ^ Since values are small in size, we want to account for key sizes as well - + // ^ Since values are small, we want to account for key sizes as well WEIGHT as u32 } } @@ -114,6 +113,14 @@ impl ValuesCache { Self(Arc::new(RwLock::new(inner))) } + fn capacity(&self) -> u64 { + self.0 + .read() + .expect("values cache is poisoned") + .values + .capacity() + } + /// *NB.* The returned value should be considered immediately stale; at best, it can be /// the lower boundary on the current `valid_for` value. fn valid_for(&self) -> L2BlockNumber { @@ -154,80 +161,86 @@ impl ValuesCache { } } + fn reset( + &self, + from_l2_block: L2BlockNumber, + to_l2_block: L2BlockNumber, + ) -> anyhow::Result<()> { + // We can spend too much time loading data from Postgres, so we opt for an easier "update" route: + // evict *everything* from cache and call it a day. This should not happen too often in practice. + tracing::info!( + "Storage values cache is too far behind (current L2 block is {from_l2_block}; \ + requested update to {to_l2_block}); resetting the cache" + ); + let mut lock = self + .0 + .write() + .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; + anyhow::ensure!( + lock.valid_for == from_l2_block, + "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ + valid for L2 block #{}", + lock.valid_for + ); + lock.valid_for = to_l2_block; + lock.values.clear(); + + CACHE_METRICS.values_emptied.inc(); + CACHE_METRICS + .values_valid_for_miniblock + .set(u64::from(to_l2_block.0)); + Ok(()) + } + async fn update( &self, from_l2_block: L2BlockNumber, to_l2_block: L2BlockNumber, connection: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { - const MAX_L2_BLOCKS_LAG: u32 = 5; - tracing::debug!( "Updating storage values cache from L2 block {from_l2_block} to {to_l2_block}" ); - if to_l2_block.0 - from_l2_block.0 > MAX_L2_BLOCKS_LAG { - // We can spend too much time loading data from Postgres, so we opt for an easier "update" route: - // evict *everything* from cache and call it a day. This should not happen too often in practice. - tracing::info!( - "Storage values cache is too far behind (current L2 block is {from_l2_block}; \ - requested update to {to_l2_block}); resetting the cache" - ); - let mut lock = self - .0 - .write() - .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; - anyhow::ensure!( - lock.valid_for == from_l2_block, - "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ - valid for L2 block #{}", - lock.valid_for - ); - lock.valid_for = to_l2_block; - lock.values.clear(); + let update_latency = CACHE_METRICS.values_update[&ValuesUpdateStage::LoadKeys].start(); + let l2_blocks = (from_l2_block + 1)..=to_l2_block; + let modified_keys = connection + .storage_logs_dal() + .modified_keys_in_l2_blocks(l2_blocks.clone()) + .await?; - CACHE_METRICS.values_emptied.inc(); - } else { - let update_latency = CACHE_METRICS.values_update[&ValuesUpdateStage::LoadKeys].start(); - let l2_blocks = (from_l2_block + 1)..=to_l2_block; - let modified_keys = connection - .storage_logs_dal() - .modified_keys_in_l2_blocks(l2_blocks.clone()) - .await?; - - let elapsed = update_latency.observe(); - CACHE_METRICS - .values_update_modified_keys - .observe(modified_keys.len()); - tracing::debug!( - "Loaded {modified_keys_len} modified storage keys from L2 blocks {l2_blocks:?}; \ - took {elapsed:?}", - modified_keys_len = modified_keys.len() - ); + let elapsed = update_latency.observe(); + CACHE_METRICS + .values_update_modified_keys + .observe(modified_keys.len()); + tracing::debug!( + "Loaded {modified_keys_len} modified storage keys from L2 blocks {l2_blocks:?}; \ + took {elapsed:?}", + modified_keys_len = modified_keys.len() + ); - let update_latency = - CACHE_METRICS.values_update[&ValuesUpdateStage::RemoveStaleKeys].start(); - let mut lock = self - .0 - .write() - .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; - // The code below holding onto the write `lock` is the only code that can theoretically poison the `RwLock` - // (other than emptying the cache above). Thus, it's kept as simple and tight as possible. - // E.g., we load data from Postgres beforehand. - anyhow::ensure!( - lock.valid_for == from_l2_block, - "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ - valid for L2 block #{}", - lock.valid_for - ); - lock.valid_for = to_l2_block; - for modified_key in &modified_keys { - lock.values.remove(modified_key); - } - lock.values.report_size(); - drop(lock); - update_latency.observe(); + let update_latency = + CACHE_METRICS.values_update[&ValuesUpdateStage::RemoveStaleKeys].start(); + let mut lock = self + .0 + .write() + .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; + // The code below holding onto the write `lock` is the only code that can theoretically poison the `RwLock` + // (other than emptying the cache above). Thus, it's kept as simple and tight as possible. + // E.g., we load data from Postgres beforehand. + anyhow::ensure!( + lock.valid_for == from_l2_block, + "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ + valid for L2 block #{}", + lock.valid_for + ); + lock.valid_for = to_l2_block; + for modified_key in &modified_keys { + lock.values.remove(modified_key); } + lock.values.report_size(); + drop(lock); + update_latency.observe(); CACHE_METRICS .values_valid_for_miniblock @@ -298,6 +311,7 @@ impl PostgresStorageCaches { pub fn configure_storage_values_cache( &mut self, capacity: u64, + max_l2_blocks_lag: u32, connection_pool: ConnectionPool, ) -> PostgresStorageCachesTask { assert!( @@ -320,6 +334,7 @@ impl PostgresStorageCaches { PostgresStorageCachesTask { connection_pool, values_cache, + max_l2_blocks_lag, command_receiver, } } @@ -349,6 +364,7 @@ impl PostgresStorageCaches { pub struct PostgresStorageCachesTask { connection_pool: ConnectionPool, values_cache: ValuesCache, + max_l2_blocks_lag: u32, command_receiver: UnboundedReceiver, } @@ -359,32 +375,41 @@ impl PostgresStorageCachesTask { /// /// - Propagates Postgres errors. /// - Propagates errors from the cache update task. + #[tracing::instrument(name = "PostgresStorageCachesTask::run", skip_all)] pub async fn run(mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + tracing::info!( + max_l2_blocks_lag = self.max_l2_blocks_lag, + values_cache.capacity = self.values_cache.capacity(), + "Starting task" + ); + let mut current_l2_block = self.values_cache.valid_for(); loop { - tokio::select! { - _ = stop_receiver.changed() => { - break; - } - Some(to_l2_block) = self.command_receiver.recv() => { - if to_l2_block <= current_l2_block { - continue; - } - let mut connection = self - .connection_pool - .connection_tagged("values_cache_updater") - .await?; - self.values_cache - .update(current_l2_block, to_l2_block, &mut connection) - .await?; - current_l2_block = to_l2_block; - } + let to_l2_block = tokio::select! { + _ = stop_receiver.changed() => break, + Some(to_l2_block) = self.command_receiver.recv() => to_l2_block, else => { // The command sender has been dropped, which means that we must receive the stop signal soon. stop_receiver.changed().await?; break; } + }; + if to_l2_block <= current_l2_block { + continue; + } + + if to_l2_block.0 - current_l2_block.0 > self.max_l2_blocks_lag { + self.values_cache.reset(current_l2_block, to_l2_block)?; + } else { + let mut connection = self + .connection_pool + .connection_tagged("values_cache_updater") + .await?; + self.values_cache + .update(current_l2_block, to_l2_block, &mut connection) + .await?; } + current_l2_block = to_l2_block; } Ok(()) } diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index f88055fa047..029df60cb46 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -462,7 +462,7 @@ async fn wait_for_cache_update(values_cache: &ValuesCache, target_l2_block: L2Bl fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); - let task = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); + let task = caches.configure_storage_values_cache(1_024 * 1_024, 5, pool.clone()); let (stop_sender, stop_receiver) = watch::channel(false); let update_task_handle = tokio::task::spawn(task.run(stop_receiver)); @@ -595,7 +595,7 @@ fn mini_fuzz_values_cache_inner( mut rt_handle: Handle, ) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); - let _ = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); + let _ = caches.configure_storage_values_cache(1_024 * 1_024, 5, pool.clone()); let values_cache = caches.values.as_ref().unwrap().cache.clone(); let mut connection = rt_handle.block_on(pool.connection()).unwrap(); diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index a09938055fa..ba1a69e23bb 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -32,6 +32,7 @@ pub struct PostgresStorageCachesConfig { pub factory_deps_cache_size: u64, pub initial_writes_cache_size: u64, pub latest_values_cache_size: u64, + pub latest_values_max_block_lag: u32, } /// Wiring layer for the `TxSender`. @@ -133,10 +134,13 @@ impl WiringLayer for TxSenderLayer { PostgresStorageCaches::new(factory_deps_capacity, initial_writes_capacity); let postgres_storage_caches_task = if values_capacity > 0 { - Some( - storage_caches - .configure_storage_values_cache(values_capacity, replica_pool.clone()), - ) + let update_task = storage_caches.configure_storage_values_cache( + values_capacity, + self.postgres_storage_caches_config + .latest_values_max_block_lag, + replica_pool.clone(), + ); + Some(update_task) } else { None }; From 6918180e558de42c0a9c5f008fa128255b16680f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 16 Oct 2024 11:04:06 +0300 Subject: [PATCH 071/140] =?UTF-8?q?test(api):=20Add=20tests=20for=20EVM=20?= =?UTF-8?q?emulator=20=E2=80=93=20API=20(#3054)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Adds unit tests in the API server crate testing a mock EVM emulator. - Allows `to == None` for `eth_call` and `debug_traceCall` RPC methods if EVM emulation is enabled, to align with Ethereum node behavior. - Fixes an integer overflow when estimating gas for L1 / upgrade transactions. ## Why ❔ Ensures that EVM emulation will work as expected. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. fix(api): Allow `to == None` for `eth_call` and `debug_traceCall` fix(api): Avoid integer overflow when estimating gas for L1 / upgrade transactions --- core/lib/vm_executor/src/oneshot/block.rs | 34 +- core/lib/vm_executor/src/oneshot/contracts.rs | 77 +++- core/lib/vm_executor/src/oneshot/env.rs | 80 +--- core/lib/vm_executor/src/oneshot/mod.rs | 6 +- .../src/execution_sandbox/execute.rs | 9 +- .../src/tx_sender/gas_estimation.rs | 42 +- core/node/api_server/src/tx_sender/mod.rs | 27 +- core/node/api_server/src/tx_sender/result.rs | 30 +- .../src/tx_sender/tests/gas_estimation.rs | 9 +- .../api_server/src/web3/namespaces/debug.rs | 2 +- .../api_server/src/web3/namespaces/eth.rs | 2 +- core/node/api_server/src/web3/testonly.rs | 140 +++--- .../node/api_server/src/web3/tests/filters.rs | 4 +- core/node/api_server/src/web3/tests/mod.rs | 56 ++- core/node/api_server/src/web3/tests/vm.rs | 419 ++++++++++++++++-- core/node/api_server/src/web3/tests/ws.rs | 6 +- core/node/consensus/src/vm.rs | 17 +- 17 files changed, 695 insertions(+), 265 deletions(-) diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index c820ea794fe..cc759c032fc 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -14,7 +14,7 @@ use zksync_types::{ }; use zksync_utils::{h256_to_u256, time::seconds_since_epoch}; -use super::env::OneshotEnvParameters; +use super::{env::OneshotEnvParameters, ContractsKind}; /// Block information necessary to execute a transaction / call. Unlike [`ResolvedBlockInfo`], this information is *partially* resolved, /// which is beneficial for some data workflows. @@ -178,7 +178,7 @@ impl ResolvedBlockInfo { } } -impl OneshotEnvParameters { +impl OneshotEnvParameters { pub(super) async fn to_env_inner( &self, connection: &mut Connection<'_, Core>, @@ -194,13 +194,15 @@ impl OneshotEnvParameters { ) .await?; - let (system, l1_batch) = self.prepare_env( - execution_mode, - resolved_block_info, - next_block, - fee_input, - enforced_base_fee, - ); + let (system, l1_batch) = self + .prepare_env( + execution_mode, + resolved_block_info, + next_block, + fee_input, + enforced_base_fee, + ) + .await?; Ok(OneshotEnv { system, l1_batch, @@ -208,14 +210,14 @@ impl OneshotEnvParameters { }) } - fn prepare_env( + async fn prepare_env( &self, execution_mode: TxExecutionMode, resolved_block_info: &ResolvedBlockInfo, next_block: L2BlockEnv, fee_input: BatchFeeInput, enforced_base_fee: Option, - ) -> (SystemEnv, L1BatchEnv) { + ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { let &Self { operator_account, validation_computational_gas_limit, @@ -228,11 +230,9 @@ impl OneshotEnvParameters { version: resolved_block_info.protocol_version, base_system_smart_contracts: self .base_system_contracts - .get_by_protocol_version( - resolved_block_info.protocol_version, - resolved_block_info.use_evm_emulator, - ) - .clone(), + .base_system_contracts(resolved_block_info) + .await + .context("failed getting base system contracts")?, bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, execution_mode, default_validation_computational_gas_limit: validation_computational_gas_limit, @@ -247,7 +247,7 @@ impl OneshotEnvParameters { enforced_base_fee, first_l2_block: next_block, }; - (system_env, l1_batch_env) + Ok((system_env, l1_batch_env)) } } diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index bc433a070b3..dc9ef0c0e8d 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -1,9 +1,52 @@ +use std::{fmt, marker::PhantomData}; + +use async_trait::async_trait; use zksync_contracts::BaseSystemContracts; use zksync_types::ProtocolVersionId; +use super::ResolvedBlockInfo; +use crate::shared::Sealed; + +/// Kind of base system contracts used as a marker in the [`BaseSystemContractsProvider`] trait. +pub trait ContractsKind: fmt::Debug + Sealed {} + +/// Marker for [`BaseSystemContracts`] used for gas estimation. +#[derive(Debug)] +pub struct EstimateGas(()); + +impl Sealed for EstimateGas {} +impl ContractsKind for EstimateGas {} + +/// Marker for [`BaseSystemContracts`] used for calls and transaction execution. +#[derive(Debug)] +pub struct CallOrExecute(()); + +impl Sealed for CallOrExecute {} +impl ContractsKind for CallOrExecute {} + +/// Provider of [`BaseSystemContracts`] for oneshot execution. +/// +/// The main implementation of this trait is [`MultiVMBaseSystemContracts`], which selects contracts +/// based on [`ProtocolVersionId`]. +#[async_trait] +pub trait BaseSystemContractsProvider: fmt::Debug + Send + Sync { + /// Returns base system contracts for executing a transaction on top of the provided block. + /// + /// Implementations are encouraged to cache returned contracts for performance; caching is **not** performed + /// by the caller. + /// + /// # Errors + /// + /// Returned errors are treated as unrecoverable for a particular execution, but further executions are not affected. + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result; +} + /// System contracts (bootloader and default account abstraction) for all supported VM versions. -#[derive(Debug, Clone)] -pub(super) struct MultiVMBaseSystemContracts { +#[derive(Debug)] +pub struct MultiVMBaseSystemContracts { /// Contracts to be used for pre-virtual-blocks protocol versions. pre_virtual_blocks: BaseSystemContracts, /// Contracts to be used for post-virtual-blocks protocol versions. @@ -24,11 +67,12 @@ pub(super) struct MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts, /// Contracts to be used after the protocol defense upgrade vm_protocol_defense: BaseSystemContracts, + // We use `fn() -> C` marker so that the `MultiVMBaseSystemContracts` unconditionally implements `Send + Sync`. + _contracts_kind: PhantomData C>, } -impl MultiVMBaseSystemContracts { - /// Gets contracts for a certain version. - pub fn get_by_protocol_version( +impl MultiVMBaseSystemContracts { + fn get_by_protocol_version( &self, version: ProtocolVersionId, use_evm_emulator: bool, @@ -71,8 +115,11 @@ impl MultiVMBaseSystemContracts { base } } +} - pub(super) fn load_estimate_gas_blocking() -> Self { +impl MultiVMBaseSystemContracts { + /// Returned system contracts (mainly the bootloader) are tuned to provide accurate execution metrics. + pub fn load_estimate_gas_blocking() -> Self { Self { pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), @@ -86,10 +133,14 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), vm_protocol_defense: BaseSystemContracts::estimate_gas_post_protocol_defense(), + _contracts_kind: PhantomData, } } +} - pub(super) fn load_eth_call_blocking() -> Self { +impl MultiVMBaseSystemContracts { + /// Returned system contracts (mainly the bootloader) are tuned to provide better UX (e.g. revert messages). + pub fn load_eth_call_blocking() -> Self { Self { pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), @@ -103,6 +154,18 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( ), vm_protocol_defense: BaseSystemContracts::playground_post_protocol_defense(), + _contracts_kind: PhantomData, } } } + +#[async_trait] +impl BaseSystemContractsProvider for MultiVMBaseSystemContracts { + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result { + Ok(self + .get_by_protocol_version(block_info.protocol_version(), block_info.use_evm_emulator())) + } +} diff --git a/core/lib/vm_executor/src/oneshot/env.rs b/core/lib/vm_executor/src/oneshot/env.rs index 51154d561ec..6d70c3cfde9 100644 --- a/core/lib/vm_executor/src/oneshot/env.rs +++ b/core/lib/vm_executor/src/oneshot/env.rs @@ -1,19 +1,12 @@ -use std::marker::PhantomData; +use std::sync::Arc; -use anyhow::Context; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{OneshotEnv, TxExecutionMode}; use zksync_types::{fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId}; -use crate::oneshot::{contracts::MultiVMBaseSystemContracts, ResolvedBlockInfo}; - -/// Marker for [`OneshotEnvParameters`] used for gas estimation. -#[derive(Debug)] -pub struct EstimateGas(()); - -/// Marker for [`OneshotEnvParameters`] used for calls and/or transaction execution. -#[derive(Debug)] -pub struct CallOrExecute(()); +use super::{ + BaseSystemContractsProvider, CallOrExecute, ContractsKind, EstimateGas, ResolvedBlockInfo, +}; /// Oneshot environment parameters that are expected to be constant or rarely change during the program lifetime. /// These parameters can be used to create [a full environment](OneshotEnv) for transaction / call execution. @@ -21,15 +14,29 @@ pub struct CallOrExecute(()); /// Notably, these parameters include base system contracts (bootloader and default account abstraction) for all supported /// VM versions. #[derive(Debug)] -pub struct OneshotEnvParameters { +pub struct OneshotEnvParameters { pub(super) chain_id: L2ChainId, - pub(super) base_system_contracts: MultiVMBaseSystemContracts, + pub(super) base_system_contracts: Arc>, pub(super) operator_account: AccountTreeId, pub(super) validation_computational_gas_limit: u32, - _ty: PhantomData, } -impl OneshotEnvParameters { +impl OneshotEnvParameters { + /// Creates env parameters. + pub fn new( + base_system_contracts: Arc>, + chain_id: L2ChainId, + operator_account: AccountTreeId, + validation_computational_gas_limit: u32, + ) -> Self { + Self { + chain_id, + base_system_contracts, + operator_account, + validation_computational_gas_limit, + } + } + /// Returns gas limit for account validation of transactions. pub fn validation_computational_gas_limit(&self) -> u32 { self.validation_computational_gas_limit @@ -37,27 +44,6 @@ impl OneshotEnvParameters { } impl OneshotEnvParameters { - /// Creates env parameters for gas estimation. - /// - /// System contracts (mainly, bootloader) for these params are tuned to provide accurate - /// execution metrics. - pub async fn for_gas_estimation( - chain_id: L2ChainId, - operator_account: AccountTreeId, - ) -> anyhow::Result { - Ok(Self { - chain_id, - base_system_contracts: tokio::task::spawn_blocking( - MultiVMBaseSystemContracts::load_estimate_gas_blocking, - ) - .await - .context("failed loading system contracts for gas estimation")?, - operator_account, - validation_computational_gas_limit: u32::MAX, - _ty: PhantomData, - }) - } - /// Prepares environment for gas estimation. pub async fn to_env( &self, @@ -78,28 +64,6 @@ impl OneshotEnvParameters { } impl OneshotEnvParameters { - /// Creates env parameters for transaction / call execution. - /// - /// System contracts (mainly, bootloader) for these params tuned to provide better UX - /// experience (e.g. revert messages). - pub async fn for_execution( - chain_id: L2ChainId, - operator_account: AccountTreeId, - validation_computational_gas_limit: u32, - ) -> anyhow::Result { - Ok(Self { - chain_id, - base_system_contracts: tokio::task::spawn_blocking( - MultiVMBaseSystemContracts::load_eth_call_blocking, - ) - .await - .context("failed loading system contracts for calls")?, - operator_account, - validation_computational_gas_limit, - _ty: PhantomData, - }) - } - /// Prepares environment for a call. pub async fn to_call_env( &self, diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index cb75f396b5d..018e5abded6 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -40,7 +40,11 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; pub use self::{ block::{BlockInfo, ResolvedBlockInfo}, - env::{CallOrExecute, EstimateGas, OneshotEnvParameters}, + contracts::{ + BaseSystemContractsProvider, CallOrExecute, ContractsKind, EstimateGas, + MultiVMBaseSystemContracts, + }, + env::OneshotEnvParameters, mock::MockOneshotExecutor, }; diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 14ac37e5936..bdd57462588 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -119,9 +119,16 @@ impl SandboxExecutor { } pub(crate) async fn mock(executor: MockOneshotExecutor) -> Self { + Self::custom_mock(executor, SandboxExecutorOptions::mock().await) + } + + pub(crate) fn custom_mock( + executor: MockOneshotExecutor, + options: SandboxExecutorOptions, + ) -> Self { Self { engine: SandboxExecutorEngine::Mock(executor), - options: SandboxExecutorOptions::mock().await, + options, storage_caches: None, } } diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index 44e568ce418..b4a05a0756b 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -131,10 +131,7 @@ impl TxSender { if let Some(pivot) = initial_pivot { let iteration_started_at = Instant::now(); - let (result, _) = estimator - .step(pivot) - .await - .context("estimate_gas step failed")?; + let (result, _) = estimator.step(pivot).await?; Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, pivot, &result); tracing::trace!( @@ -151,10 +148,7 @@ impl TxSender { // or normal execution errors, so we just hope that increasing the // gas limit will make the transaction successful let iteration_started_at = Instant::now(); - let (result, _) = estimator - .step(mid) - .await - .context("estimate_gas step failed")?; + let (result, _) = estimator.step(mid).await?; Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, mid, &result); tracing::trace!( @@ -206,7 +200,11 @@ impl TxSender { tx.initiator_account(), tx.execute.value ); - return Err(SubmitTxError::InsufficientFundsForTransfer); + return Err(SubmitTxError::NotEnoughBalanceForFeeValue( + balance, + 0.into(), + tx.execute.value, + )); } } Ok(()) @@ -393,10 +391,7 @@ impl<'a> GasEstimator<'a> { // For L2 transactions, we estimate the amount of gas needed to cover for the pubdata by creating a transaction with infinite gas limit, // and getting how much pubdata it used. - let (result, _) = self - .unadjusted_step(self.max_gas_limit) - .await - .context("estimate_gas step failed")?; + let (result, _) = self.unadjusted_step(self.max_gas_limit).await?; // If the transaction has failed with such a large gas limit, we return an API error here right away, // since the inferred gas bounds would be unreliable in this case. result.check_api_call_result()?; @@ -430,7 +425,7 @@ impl<'a> GasEstimator<'a> { async fn step( &self, tx_gas_limit: u64, - ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { let gas_limit_with_overhead = tx_gas_limit + self.tx_overhead(tx_gas_limit); // We need to ensure that we never use a gas limit that is higher than the maximum allowed let forced_gas_limit = @@ -441,13 +436,16 @@ impl<'a> GasEstimator<'a> { pub(super) async fn unadjusted_step( &self, forced_gas_limit: u64, - ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { let mut tx = self.transaction.clone(); match &mut tx.common_data { ExecuteTransactionCommon::L1(l1_common_data) => { l1_common_data.gas_limit = forced_gas_limit.into(); - let required_funds = - l1_common_data.gas_limit * l1_common_data.max_fee_per_gas + tx.execute.value; + // Since `tx.execute.value` is supplied by the client and is not checked against the current balance (unlike for L2 transactions), + // we may hit an integer overflow. Ditto for protocol upgrade transactions below. + let required_funds = (l1_common_data.gas_limit * l1_common_data.max_fee_per_gas) + .checked_add(tx.execute.value) + .ok_or(SubmitTxError::MintedAmountOverflow)?; l1_common_data.to_mint = required_funds; } ExecuteTransactionCommon::L2(l2_common_data) => { @@ -455,8 +453,9 @@ impl<'a> GasEstimator<'a> { } ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { common_data.gas_limit = forced_gas_limit.into(); - let required_funds = - common_data.gas_limit * common_data.max_fee_per_gas + tx.execute.value; + let required_funds = (common_data.gas_limit * common_data.max_fee_per_gas) + .checked_add(tx.execute.value) + .ok_or(SubmitTxError::MintedAmountOverflow)?; common_data.to_mint = required_funds; } } @@ -485,10 +484,7 @@ impl<'a> GasEstimator<'a> { suggested_gas_limit: u64, estimated_fee_scale_factor: f64, ) -> Result { - let (result, tx_metrics) = self - .step(suggested_gas_limit) - .await - .context("final estimate_gas step failed")?; + let (result, tx_metrics) = self.step(suggested_gas_limit).await?; result.into_api_call_result()?; self.sender .ensure_tx_executable(&self.transaction, &tx_metrics, false)?; diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 2dbc0d5a0dd..38794fe7137 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -29,7 +29,9 @@ use zksync_types::{ MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::h256_to_u256; -use zksync_vm_executor::oneshot::{CallOrExecute, EstimateGas, OneshotEnvParameters}; +use zksync_vm_executor::oneshot::{ + CallOrExecute, EstimateGas, MultiVMBaseSystemContracts, OneshotEnvParameters, +}; pub(super) use self::{gas_estimation::BinarySearchKind, result::SubmitTxError}; use self::{master_pool_sink::MasterPoolSink, result::ApiCallResult, tx_sink::TxSink}; @@ -102,15 +104,28 @@ impl SandboxExecutorOptions { operator_account: AccountTreeId, validation_computational_gas_limit: u32, ) -> anyhow::Result { + let estimate_gas_contracts = + tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_estimate_gas_blocking) + .await + .context("failed loading base contracts for gas estimation")?; + let call_contracts = + tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking) + .await + .context("failed loading base contracts for calls / tx execution")?; + Ok(Self { - estimate_gas: OneshotEnvParameters::for_gas_estimation(chain_id, operator_account) - .await?, - eth_call: OneshotEnvParameters::for_execution( + estimate_gas: OneshotEnvParameters::new( + Arc::new(estimate_gas_contracts), + chain_id, + operator_account, + u32::MAX, + ), + eth_call: OneshotEnvParameters::new( + Arc::new(call_contracts), chain_id, operator_account, validation_computational_gas_limit, - ) - .await?, + ), }) } diff --git a/core/node/api_server/src/tx_sender/result.rs b/core/node/api_server/src/tx_sender/result.rs index a49313f0dd6..e2a51ae8e9a 100644 --- a/core/node/api_server/src/tx_sender/result.rs +++ b/core/node/api_server/src/tx_sender/result.rs @@ -24,8 +24,6 @@ pub enum SubmitTxError { GasLimitIsTooBig, #[error("{0}")] Unexecutable(String), - #[error("too many transactions")] - RateLimitExceeded, #[error("server shutting down")] ServerShuttingDown, #[error("failed to include transaction in the system. reason: {0}")] @@ -49,29 +47,23 @@ pub enum SubmitTxError { that caused this error. Error description: {0}" )] UnexpectedVMBehavior(String), - #[error("pubdata price limit is too low, ensure that the price limit is correct")] - UnrealisticPubdataPriceLimit, #[error( "too many factory dependencies in the transaction. {0} provided, while only {1} allowed" )] TooManyFactoryDependencies(usize, usize), - #[error("max fee per gas higher than 2^32")] - FeePerGasTooHigh, - #[error("max fee per pubdata byte higher than 2^32")] - FeePerPubdataByteTooHigh, - /// InsufficientFundsForTransfer is returned if the transaction sender doesn't - /// have enough funds for transfer. - #[error("insufficient balance for transfer")] - InsufficientFundsForTransfer, /// IntrinsicGas is returned if the transaction is specified to use less gas /// than required to start the invocation. #[error("intrinsic gas too low")] IntrinsicGas, - /// Error returned from main node - #[error("{0}")] - ProxyError(#[from] EnrichedClientError), #[error("not enough gas to publish compressed bytecodes")] FailedToPublishCompressedBytecodes, + /// Currently only triggered during gas estimation for L1 and protocol upgrade transactions. + #[error("integer overflow computing base token amount to mint")] + MintedAmountOverflow, + + /// Error returned from main node. + #[error("{0}")] + ProxyError(#[from] EnrichedClientError), /// Catch-all internal error (e.g., database error) that should not be exposed to the caller. #[error("internal error")] Internal(#[from] anyhow::Error), @@ -88,7 +80,6 @@ impl SubmitTxError { Self::ExecutionReverted(_, _) => "execution-reverted", Self::GasLimitIsTooBig => "gas-limit-is-too-big", Self::Unexecutable(_) => "unexecutable", - Self::RateLimitExceeded => "rate-limit-exceeded", Self::ServerShuttingDown => "shutting-down", Self::BootloaderFailure(_) => "bootloader-failure", Self::ValidationFailed(_) => "validation-failed", @@ -99,14 +90,11 @@ impl SubmitTxError { Self::MaxFeePerGasTooLow => "max-fee-per-gas-too-low", Self::MaxPriorityFeeGreaterThanMaxFee => "max-priority-fee-greater-than-max-fee", Self::UnexpectedVMBehavior(_) => "unexpected-vm-behavior", - Self::UnrealisticPubdataPriceLimit => "unrealistic-pubdata-price-limit", Self::TooManyFactoryDependencies(_, _) => "too-many-factory-dependencies", - Self::FeePerGasTooHigh => "gas-price-limit-too-high", - Self::FeePerPubdataByteTooHigh => "pubdata-price-limit-too-high", - Self::InsufficientFundsForTransfer => "insufficient-funds-for-transfer", Self::IntrinsicGas => "intrinsic-gas", - Self::ProxyError(_) => "proxy-error", Self::FailedToPublishCompressedBytecodes => "failed-to-publish-compressed-bytecodes", + Self::MintedAmountOverflow => "minted-amount-overflow", + Self::ProxyError(_) => "proxy-error", Self::Internal(_) => "internal", } } diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 3fd5fcb5188..4528d9cda12 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -298,7 +298,8 @@ async fn insufficient_funds_error_for_transfer() { let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); - let tx = alice.create_transfer(1_000_000_000.into()); + let transferred_value = 1_000_000_000.into(); + let tx = alice.create_transfer(transferred_value); let fee_scale_factor = 1.0; // Without overrides, the transaction should fail because of insufficient balance. let err = tx_sender @@ -312,7 +313,11 @@ async fn insufficient_funds_error_for_transfer() { ) .await .unwrap_err(); - assert_matches!(err, SubmitTxError::InsufficientFundsForTransfer); + assert_matches!( + err, + SubmitTxError::NotEnoughBalanceForFeeValue(balance, fee, value) + if balance.is_zero() && fee.is_zero() && value == transferred_value + ); } async fn test_estimating_gas( diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 7e99808dbc7..e296fe87faa 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -262,7 +262,7 @@ impl DebugNamespace { let call = L2Tx::from_request( request.into(), MAX_ENCODED_TX_SIZE, - false, // Even with EVM emulation enabled, calls must specify `to` field + block_args.use_evm_emulator(), )?; let vm_permit = self diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 4439fc257cf..5206cd3bc2b 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -81,7 +81,7 @@ impl EthNamespace { let tx = L2Tx::from_request( request.into(), self.state.api_config.max_tx_size, - false, // Even with EVM emulation enabled, calls must specify `to` field + block_args.use_evm_emulator(), )?; // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 3b05e235c6d..2d642b9a04b 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -13,7 +13,10 @@ use zksync_types::L2ChainId; use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::{metrics::ApiTransportLabel, *}; -use crate::{execution_sandbox::SandboxExecutor, tx_sender::TxSenderConfig}; +use crate::{ + execution_sandbox::SandboxExecutor, + tx_sender::{SandboxExecutorOptions, TxSenderConfig}, +}; const TEST_TIMEOUT: Duration = Duration::from_secs(90); const POLL_INTERVAL: Duration = Duration::from_millis(50); @@ -103,6 +106,7 @@ pub struct TestServerBuilder { pool: ConnectionPool, api_config: InternalApiConfig, tx_executor: MockOneshotExecutor, + executor_options: Option, method_tracer: Arc, } @@ -113,6 +117,7 @@ impl TestServerBuilder { api_config, pool, tx_executor: MockOneshotExecutor::default(), + executor_options: None, method_tracer: Arc::default(), } } @@ -131,19 +136,17 @@ impl TestServerBuilder { self } + #[must_use] + pub fn with_executor_options(mut self, options: SandboxExecutorOptions) -> Self { + self.executor_options = Some(options); + self + } + /// Builds an HTTP server. pub async fn build_http(self, stop_receiver: watch::Receiver) -> ApiServerHandles { - spawn_server( - ApiTransportLabel::Http, - self.api_config, - self.pool, - None, - self.tx_executor, - self.method_tracer, - stop_receiver, - ) - .await - .0 + self.spawn_server(ApiTransportLabel::Http, None, stop_receiver) + .await + .0 } /// Builds a WS server. @@ -152,64 +155,73 @@ impl TestServerBuilder { websocket_requests_per_minute_limit: Option, stop_receiver: watch::Receiver, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { - spawn_server( + self.spawn_server( ApiTransportLabel::Ws, - self.api_config, - self.pool, websocket_requests_per_minute_limit, - self.tx_executor, - self.method_tracer, stop_receiver, ) .await } -} -async fn spawn_server( - transport: ApiTransportLabel, - api_config: InternalApiConfig, - pool: ConnectionPool, - websocket_requests_per_minute_limit: Option, - tx_executor: MockOneshotExecutor, - method_tracer: Arc, - stop_receiver: watch::Receiver, -) -> (ApiServerHandles, mpsc::UnboundedReceiver) { - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, vm_barrier) = - create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor).await; - let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); - - let mut namespaces = Namespace::DEFAULT.to_vec(); - namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); - let sealed_l2_block_handle = SealedL2BlockNumber::default(); - let bridge_addresses_handle = BridgeAddressesHandle::new(api_config.bridge_addresses.clone()); - - let server_builder = match transport { - ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), - ApiTransportLabel::Ws => { - let mut builder = ApiBuilder::jsonrpsee_backend(api_config, pool) - .ws(0) - .with_subscriptions_limit(100); - if let Some(websocket_requests_per_minute_limit) = websocket_requests_per_minute_limit { - builder = builder - .with_websocket_requests_per_minute_limit(websocket_requests_per_minute_limit); + async fn spawn_server( + self, + transport: ApiTransportLabel, + websocket_requests_per_minute_limit: Option, + stop_receiver: watch::Receiver, + ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { + let Self { + tx_executor, + executor_options, + pool, + api_config, + method_tracer, + } = self; + + let tx_executor = if let Some(options) = executor_options { + SandboxExecutor::custom_mock(tx_executor, options) + } else { + SandboxExecutor::mock(tx_executor).await + }; + let (tx_sender, vm_barrier) = + create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor).await; + let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); + + let mut namespaces = Namespace::DEFAULT.to_vec(); + namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = + BridgeAddressesHandle::new(api_config.bridge_addresses.clone()); + + let server_builder = match transport { + ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), + ApiTransportLabel::Ws => { + let mut builder = ApiBuilder::jsonrpsee_backend(api_config, pool) + .ws(0) + .with_subscriptions_limit(100); + if let Some(websocket_requests_per_minute_limit) = + websocket_requests_per_minute_limit + { + builder = builder.with_websocket_requests_per_minute_limit( + websocket_requests_per_minute_limit, + ); + } + builder } - builder - } - }; - let server_handles = server_builder - .with_polling_interval(POLL_INTERVAL) - .with_tx_sender(tx_sender) - .with_vm_barrier(vm_barrier) - .with_pub_sub_events(pub_sub_events_sender) - .with_method_tracer(method_tracer) - .enable_api_namespaces(namespaces) - .with_sealed_l2_block_handle(sealed_l2_block_handle) - .with_bridge_addresses_handle(bridge_addresses_handle) - .build() - .expect("Unable to build API server") - .run(stop_receiver) - .await - .expect("Failed spawning JSON-RPC server"); - (server_handles, pub_sub_events_receiver) + }; + let server_handles = server_builder + .with_polling_interval(POLL_INTERVAL) + .with_tx_sender(tx_sender) + .with_vm_barrier(vm_barrier) + .with_pub_sub_events(pub_sub_events_sender) + .with_method_tracer(method_tracer) + .enable_api_namespaces(namespaces) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle) + .build() + .expect("Unable to build API server") + .run(stop_receiver) + .await + .expect("Failed spawning JSON-RPC server"); + (server_handles, pub_sub_events_receiver) + } } diff --git a/core/node/api_server/src/web3/tests/filters.rs b/core/node/api_server/src/web3/tests/filters.rs index 7342ce7e979..c865526815d 100644 --- a/core/node/api_server/src/web3/tests/filters.rs +++ b/core/node/api_server/src/web3/tests/filters.rs @@ -23,7 +23,7 @@ impl HttpTest for BasicFilterChangesTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -109,7 +109,7 @@ impl HttpTest for LogFilterChangesTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index a8d90c281a7..c83279709a3 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -16,6 +16,7 @@ use zksync_config::{ }, GenesisConfig, }; +use zksync_contracts::BaseSystemContracts; use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; use zksync_multivm::interface::{ TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, @@ -66,6 +67,7 @@ use zksync_web3_decl::{ use super::*; use crate::{ testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, + tx_sender::SandboxExecutorOptions, web3::testonly::TestServerBuilder, }; @@ -143,13 +145,18 @@ async fn setting_response_size_limits() { trait HttpTest: Send + Sync { /// Prepares the storage before the server is started. The default implementation performs genesis. fn storage_initialization(&self) -> StorageInitialization { - StorageInitialization::Genesis + StorageInitialization::genesis() } fn transaction_executor(&self) -> MockOneshotExecutor { MockOneshotExecutor::default() } + /// Allows to override sandbox executor options. + fn executor_options(&self) -> Option { + None + } + fn method_tracer(&self) -> Arc { Arc::default() } @@ -166,7 +173,9 @@ trait HttpTest: Send + Sync { /// Storage initialization strategy. #[derive(Debug)] enum StorageInitialization { - Genesis, + Genesis { + evm_emulator: bool, + }, Recovery { logs: Vec, factory_deps: HashMap>, @@ -177,6 +186,16 @@ impl StorageInitialization { const SNAPSHOT_RECOVERY_BATCH: L1BatchNumber = L1BatchNumber(23); const SNAPSHOT_RECOVERY_BLOCK: L2BlockNumber = L2BlockNumber(23); + const fn genesis() -> Self { + Self::Genesis { + evm_emulator: false, + } + } + + const fn genesis_with_evm() -> Self { + Self::Genesis { evm_emulator: true } + } + fn empty_recovery() -> Self { Self::Recovery { logs: vec![], @@ -190,12 +209,29 @@ impl StorageInitialization { storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { match self { - Self::Genesis => { - let params = GenesisParams::load_genesis_params(GenesisConfig { + Self::Genesis { evm_emulator } => { + let mut config = GenesisConfig { l2_chain_id: network_config.zksync_network_id, ..mock_genesis_config() - }) + }; + let mut base_system_contracts = BaseSystemContracts::load_from_disk(); + if evm_emulator { + config.evm_emulator_hash = Some(config.default_aa_hash.unwrap()); + base_system_contracts.evm_emulator = + Some(base_system_contracts.default_aa.clone()); + } else { + assert!(config.evm_emulator_hash.is_none()); + } + + let params = GenesisParams::from_genesis_config( + config, + base_system_contracts, + // We cannot load system contracts with EVM emulator yet because these contracts are missing. + // This doesn't matter for tests because the EVM emulator won't be invoked. + get_system_smart_contracts(false), + ) .unwrap(); + if storage.blocks_dal().is_genesis_needed().await? { insert_genesis_batch(storage, ¶ms).await?; } @@ -254,11 +290,13 @@ async fn test_http_server(test: impl HttpTest) { let genesis = GenesisConfig::for_tests(); let mut api_config = InternalApiConfig::new(&web3_config, &contracts_config, &genesis); api_config.filters_disabled = test.filters_disabled(); - let mut server_handles = TestServerBuilder::new(pool.clone(), api_config) + let mut server_builder = TestServerBuilder::new(pool.clone(), api_config) .with_tx_executor(test.transaction_executor()) - .with_method_tracer(test.method_tracer()) - .build_http(stop_receiver) - .await; + .with_method_tracer(test.method_tracer()); + if let Some(executor_options) = test.executor_options() { + server_builder = server_builder.with_executor_options(executor_options); + } + let mut server_handles = server_builder.build_http(stop_receiver).await; let local_addr = server_handles.wait_until_ready().await; let client = Client::http(format!("http://{local_addr}/").parse().unwrap()) diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 1f843e06fab..45128f579cd 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -9,16 +9,21 @@ use std::{ }; use api::state_override::{OverrideAccount, StateOverride}; +use test_casing::test_casing; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_multivm::interface::{ - ExecutionResult, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, + ExecutionResult, OneshotEnv, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, }; use zksync_types::{ api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, - StorageLogKind, StorageLogWithPreviousValue, U256, + StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, }; use zksync_utils::u256_to_h256; -use zksync_vm_executor::oneshot::MockOneshotExecutor; +use zksync_vm_executor::oneshot::{ + BaseSystemContractsProvider, ContractsKind, MockOneshotExecutor, OneshotEnvParameters, + ResolvedBlockInfo, +}; use zksync_web3_decl::namespaces::DebugNamespaceClient; use super::*; @@ -61,6 +66,59 @@ impl ExpectedFeeInput { } } +/// Mock base contracts provider. Necessary to use with EVM emulator because bytecode of the real emulator is not available yet. +#[derive(Debug)] +struct BaseContractsWithMockEvmEmulator(BaseSystemContracts); + +impl Default for BaseContractsWithMockEvmEmulator { + fn default() -> Self { + let mut contracts = BaseSystemContracts::load_from_disk(); + contracts.evm_emulator = Some(contracts.default_aa.clone()); + Self(contracts) + } +} + +#[async_trait] +impl BaseSystemContractsProvider for BaseContractsWithMockEvmEmulator { + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result { + assert!(block_info.use_evm_emulator()); + Ok(self.0.clone()) + } +} + +fn executor_options_with_evm_emulator() -> SandboxExecutorOptions { + let base_contracts = Arc::::default(); + SandboxExecutorOptions { + estimate_gas: OneshotEnvParameters::new( + base_contracts.clone(), + L2ChainId::default(), + AccountTreeId::default(), + u32::MAX, + ), + eth_call: OneshotEnvParameters::new( + base_contracts, + L2ChainId::default(), + AccountTreeId::default(), + u32::MAX, + ), + } +} + +/// Fetches base contract hashes from the genesis block. +async fn genesis_contract_hashes( + connection: &mut Connection<'_, Core>, +) -> anyhow::Result { + Ok(connection + .blocks_dal() + .get_l2_block_header(L2BlockNumber(0)) + .await? + .context("no genesis block")? + .base_system_contracts_hashes) +} + #[derive(Debug, Default)] struct CallTest { fee_input: ExpectedFeeInput, @@ -161,19 +219,104 @@ impl HttpTest for CallTest { store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); - let call_request = CallTest::call_request(b"block=3"); - let call_result = client.call(call_request.clone(), None, None).await?; + let call_request = Self::call_request(b"block=3"); + let call_result = client.call(call_request, None, None).await?; assert_eq!(call_result.0, b"output"); + let call_request_without_target = CallRequest { + to: None, + ..Self::call_request(b"block=3") + }; + let err = client + .call(call_request_without_target, None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) } } +fn assert_null_to_address_error(error: &ClientError) { + if let ClientError::Call(error) = error { + assert_eq!(error.code(), 3); + assert!(error.message().contains("toAddressIsNull"), "{error:?}"); + assert!(error.data().is_none(), "{error:?}"); + } else { + panic!("Unexpected error: {error:?}"); + } +} + #[tokio::test] async fn call_method_basics() { test_http_server(CallTest::default()).await; } +fn evm_emulator_responses(tx: &Transaction, env: &OneshotEnv) -> ExecutionResult { + assert!(env + .system + .base_system_smart_contracts + .evm_emulator + .is_some()); + match tx.execute.calldata.as_slice() { + b"no_target" => assert_eq!(tx.recipient_account(), None), + _ => assert!(tx.recipient_account().is_some()), + } + ExecutionResult::Success { + output: b"output".to_vec(), + } +} + +#[derive(Debug)] +struct CallTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for CallTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_call_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + let block_header = L2BlockHeader { + base_system_contracts_hashes: genesis_contract_hashes(&mut connection).await?, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + + let call_result = client.call(CallTest::call_request(&[]), None, None).await?; + assert_eq!(call_result.0, b"output"); + + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"no_target") + }; + let call_result = client.call(call_request_without_target, None, None).await?; + assert_eq!(call_result.0, b"output"); + Ok(()) + } +} + +#[tokio::test] +async fn call_method_with_evm_emulator() { + test_http_server(CallTestWithEvmEmulator).await; +} + #[derive(Debug, Default)] struct CallTestAfterSnapshotRecovery { fee_input: ExpectedFeeInput, @@ -253,7 +396,11 @@ impl SendRawTransactionTest { value: 123_456.into(), gas: (get_intrinsic_constants().l2_tx_intrinsic_gas * 2).into(), gas_price: StateKeeperConfig::for_tests().minimal_l2_gas_price.into(), - input: vec![1, 2, 3, 4].into(), + input: if include_to { + vec![1, 2, 3, 4].into() + } else { + b"no_target".to_vec().into() + }, ..api::TransactionRequest::default() }; let data = tx_request.get_rlp().unwrap(); @@ -288,7 +435,7 @@ impl HttpTest for SendRawTransactionTest { factory_deps: HashMap::default(), } } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -344,16 +491,6 @@ async fn send_raw_transaction_after_snapshot_recovery() { .await; } -fn assert_null_to_address_error(error: &ClientError) { - if let ClientError::Call(error) = error { - assert_eq!(error.code(), 3); - assert!(error.message().contains("toAddressIsNull"), "{error:?}"); - assert!(error.data().is_none(), "{error:?}"); - } else { - panic!("Unexpected error: {error:?}"); - } -} - #[derive(Debug)] struct SendRawTransactionWithoutToAddressTest; @@ -388,6 +525,56 @@ async fn send_raw_transaction_fails_without_to_address() { test_http_server(SendRawTransactionWithoutToAddressTest).await; } +#[derive(Debug)] +struct SendRawTransactionTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for SendRawTransactionTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_tx_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Manually set sufficient balance for the transaction account. + let mut storage = pool.connection().await?; + storage + .storage_logs_dal() + .append_storage_logs( + L2BlockNumber(0), + &[SendRawTransactionTest::balance_storage_log()], + ) + .await?; + + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(true); + let send_result = client.send_raw_transaction(tx_bytes.into()).await?; + assert_eq!(send_result, tx_hash); + + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(false); + let send_result = client.send_raw_transaction(tx_bytes.into()).await?; + assert_eq!(send_result, tx_hash); + Ok(()) + } +} + +#[tokio::test] +async fn send_raw_transaction_with_evm_emulator() { + test_http_server(SendRawTransactionTestWithEvmEmulator).await; +} + #[derive(Debug)] struct SendTransactionWithDetailedOutputTest; @@ -602,6 +789,16 @@ impl HttpTest for TraceCallTest { let call_result = client.trace_call(call_request.clone(), None, None).await?; Self::assert_debug_call(&call_request, &call_result.unwrap_default()); + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"block=3") + }; + let err = client + .call(call_request_without_target, None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) } } @@ -678,16 +875,96 @@ async fn trace_call_after_snapshot_recovery() { test_http_server(TraceCallTestAfterSnapshotRecovery::default()).await; } +#[derive(Debug)] +struct TraceCallTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for TraceCallTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_call_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + let block_header = L2BlockHeader { + base_system_contracts_hashes: genesis_contract_hashes(&mut connection).await?, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + + client + .trace_call(CallTest::call_request(&[]), None, None) + .await?; + + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"no_target") + }; + client + .trace_call(call_request_without_target, None, None) + .await?; + Ok(()) + } +} + +#[tokio::test] +async fn trace_call_method_with_evm_emulator() { + test_http_server(TraceCallTestWithEvmEmulator).await; +} + +#[derive(Debug, Clone, Copy)] +enum EstimateMethod { + EthEstimateGas, + ZksEstimateFee, + ZksEstimateGasL1ToL2, +} + +impl EstimateMethod { + const ALL: [Self; 3] = [ + Self::EthEstimateGas, + Self::ZksEstimateFee, + Self::ZksEstimateGasL1ToL2, + ]; + + async fn query(self, client: &DynClient, req: CallRequest) -> Result { + match self { + Self::EthEstimateGas => client.estimate_gas(req, None, None).await, + Self::ZksEstimateFee => client + .estimate_fee(req, None) + .await + .map(|fee| fee.gas_limit), + Self::ZksEstimateGasL1ToL2 => client.estimate_gas_l1_to_l2(req, None).await, + } + } +} + #[derive(Debug)] struct EstimateGasTest { gas_limit_threshold: Arc, + method: EstimateMethod, snapshot_recovery: bool, } impl EstimateGasTest { - fn new(snapshot_recovery: bool) -> Self { + fn new(method: EstimateMethod, snapshot_recovery: bool) -> Self { Self { gas_limit_threshold: Arc::default(), + method, snapshot_recovery, } } @@ -708,9 +985,12 @@ impl HttpTest for EstimateGasTest { L2BlockNumber(1) }; let gas_limit_threshold = self.gas_limit_threshold.clone(); + let should_set_nonce = !matches!(self.method, EstimateMethod::ZksEstimateGasL1ToL2); tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.execute.calldata(), [] as [u8; 0]); - assert_eq!(tx.nonce(), Some(Nonce(0))); + if should_set_nonce { + assert_eq!(tx.nonce(), Some(Nonce(0))); + } assert_eq!(env.l1_batch.first_l2_block.number, pending_block_number.0); let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); @@ -733,8 +1013,9 @@ impl HttpTest for EstimateGasTest { let l2_transaction = create_l2_transaction(10, 100); for threshold in [10_000, 50_000, 100_000, 1_000_000] { self.gas_limit_threshold.store(threshold, Ordering::Relaxed); - let output = client - .estimate_gas(l2_transaction.clone().into(), None, None) + let output = self + .method + .query(client, l2_transaction.clone().into()) .await?; assert!( output >= U256::from(threshold), @@ -759,19 +1040,17 @@ impl HttpTest for EstimateGasTest { let mut call_request = CallRequest::from(l2_transaction); call_request.from = Some(SendRawTransactionTest::private_key().address()); call_request.value = Some(1_000_000.into()); - client - .estimate_gas(call_request.clone(), None, None) - .await?; + + self.method.query(client, call_request.clone()).await?; call_request.value = Some(U256::max_value()); - let error = client - .estimate_gas(call_request, None, None) - .await - .unwrap_err(); + let error = self.method.query(client, call_request).await.unwrap_err(); if let ClientError::Call(error) = error { let error_msg = error.message(); + // L1 and L2 transactions have differing error messages in this case. assert!( - error_msg.to_lowercase().contains("insufficient"), + error_msg.to_lowercase().contains("insufficient") + || error_msg.to_lowercase().contains("overflow"), "{error_msg}" ); } else { @@ -781,14 +1060,16 @@ impl HttpTest for EstimateGasTest { } } +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_basics() { - test_http_server(EstimateGasTest::new(false)).await; +async fn estimate_gas_basics(method: EstimateMethod) { + test_http_server(EstimateGasTest::new(method, false)).await; } +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_after_snapshot_recovery() { - test_http_server(EstimateGasTest::new(true)).await; +async fn estimate_gas_after_snapshot_recovery(method: EstimateMethod) { + test_http_server(EstimateGasTest::new(method, true)).await; } #[derive(Debug)] @@ -845,9 +1126,7 @@ impl HttpTest for EstimateGasWithStateOverrideTest { if let ClientError::Call(error) = error { let error_msg = error.message(); assert!( - error_msg - .to_lowercase() - .contains("insufficient balance for transfer"), + error_msg.to_lowercase().contains("insufficient funds"), "{error_msg}" ); } else { @@ -859,15 +1138,17 @@ impl HttpTest for EstimateGasWithStateOverrideTest { #[tokio::test] async fn estimate_gas_with_state_override() { - let inner = EstimateGasTest::new(false); + let inner = EstimateGasTest::new(EstimateMethod::EthEstimateGas, false); test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } #[derive(Debug)] -struct EstimateGasWithoutToAddessTest; +struct EstimateGasWithoutToAddressTest { + method: EstimateMethod, +} #[async_trait] -impl HttpTest for EstimateGasWithoutToAddessTest { +impl HttpTest for EstimateGasWithoutToAddressTest { async fn test( &self, client: &DynClient, @@ -876,8 +1157,9 @@ impl HttpTest for EstimateGasWithoutToAddessTest { let mut l2_transaction = create_l2_transaction(10, 100); l2_transaction.execute.contract_address = None; l2_transaction.common_data.signature = vec![]; // Remove invalidated signature so that it doesn't trip estimation logic - let err = client - .estimate_gas(l2_transaction.clone().into(), None, None) + let err = self + .method + .query(client, l2_transaction.into()) .await .unwrap_err(); assert_null_to_address_error(&err); @@ -885,7 +1167,58 @@ impl HttpTest for EstimateGasWithoutToAddessTest { } } +#[test_casing(3, EstimateMethod::ALL)] +#[tokio::test] +async fn estimate_gas_fails_without_to_address(method: EstimateMethod) { + test_http_server(EstimateGasWithoutToAddressTest { method }).await; +} + +#[derive(Debug)] +struct EstimateGasTestWithEvmEmulator { + method: EstimateMethod, +} + +#[async_trait] +impl HttpTest for EstimateGasTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_tx_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let call_request = CallRequest { + from: Some(Address::repeat_byte(1)), + to: Some(Address::repeat_byte(2)), + ..CallRequest::default() + }; + self.method.query(client, call_request).await?; + + let call_request = CallRequest { + from: Some(Address::repeat_byte(1)), + to: None, + data: Some(b"no_target".to_vec().into()), + ..CallRequest::default() + }; + self.method.query(client, call_request).await?; + Ok(()) + } +} + +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_fails_without_to_address() { - test_http_server(EstimateGasWithoutToAddessTest).await; +async fn estimate_gas_with_evm_emulator(method: EstimateMethod) { + test_http_server(EstimateGasTestWithEvmEmulator { method }).await; } diff --git a/core/node/api_server/src/web3/tests/ws.rs b/core/node/api_server/src/web3/tests/ws.rs index 28b2e2beb55..008747a63bc 100644 --- a/core/node/api_server/src/web3/tests/ws.rs +++ b/core/node/api_server/src/web3/tests/ws.rs @@ -147,7 +147,7 @@ async fn notifiers_start_after_snapshot_recovery() { trait WsTest: Send + Sync { /// Prepares the storage before the server is started. The default implementation performs genesis. fn storage_initialization(&self) -> StorageInitialization { - StorageInitialization::Genesis + StorageInitialization::genesis() } async fn test( @@ -234,7 +234,7 @@ impl WsTest for BasicSubscriptionsTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -403,7 +403,7 @@ impl WsTest for LogSubscriptionsTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 149e6b3ccb0..46b84c34061 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -1,11 +1,15 @@ +use std::sync::Arc; + use anyhow::Context as _; use tokio::runtime::Handle; -use zksync_concurrency::{ctx, error::Wrap as _}; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; use zksync_consensus_roles::attester; use zksync_state::PostgresStorage; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ethabi, fee::Fee, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256}; -use zksync_vm_executor::oneshot::{CallOrExecute, MainOneshotExecutor, OneshotEnvParameters}; +use zksync_vm_executor::oneshot::{ + CallOrExecute, MainOneshotExecutor, MultiVMBaseSystemContracts, OneshotEnvParameters, +}; use zksync_vm_interface::{ executor::OneshotExecutor, ExecutionResult, OneshotTracingParams, TxExecutionArgs, }; @@ -23,16 +27,17 @@ pub(crate) struct VM { impl VM { /// Constructs a new `VM` instance. pub async fn new(pool: ConnectionPool) -> Self { + let base_system_contracts = + scope::wait_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking).await; Self { pool, // L2 chain ID and fee account don't seem to matter for calls, hence the use of default values. - options: OneshotEnvParameters::for_execution( + options: OneshotEnvParameters::new( + Arc::new(base_system_contracts), L2ChainId::default(), AccountTreeId::default(), u32::MAX, - ) - .await - .expect("OneshotExecutorOptions"), + ), executor: MainOneshotExecutor::new(usize::MAX), } } From 93d2575e2a122d1395536da4403ccd570c85cf88 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:42:26 +0300 Subject: [PATCH 072/140] feat(contract-verifier): add compiler 1.5.6 (#3104) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ add zksolc 1.5.6 ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- docker/contract-verifier/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7ed18626a1b..80938e4ef83 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -47,7 +47,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 5); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ From 0edd7962429b3530ae751bd7cc947c97193dd0ca Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:11:57 +0300 Subject: [PATCH 073/140] feat: Prover e2e test (#2975) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add workflow that runs proving for a genesis batch. Update dockerfiles and docker compose for GPU Add circuit prover to zkstack CLI. Fix HTTP URL for prover gateway. ## Why ❔ To detect possible runtime issues. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci-prover-e2e.yml | 127 ++++++++++++++++++ .github/workflows/ci.yml | 6 + .github/workflows/zk-environment-publish.yml | 6 +- .../batch_availability_checker | 40 ++++++ bin/prover_checkers/batch_l1_status_checker | 54 ++++++++ bin/prover_checkers/kill_prover | 12 ++ .../prover_jobs_status_checker | 42 ++++++ core/node/proof_data_handler/src/lib.rs | 2 +- docker-compose-gpu-runner-cuda-12-0.yml | 13 +- docker-compose-gpu-runner.yml | 7 +- ...rfile => 22.04_amd64_cuda_11_8.Dockerfile} | 23 ++-- ...rfile => 22.04_amd64_cuda_12_0.Dockerfile} | 21 +-- prover/crates/lib/prover_fri_types/src/lib.rs | 4 +- .../src/commands/chain/init/configs.rs | 12 ++ .../zkstack/src/commands/prover/args/init.rs | 62 +++++++-- .../commands/prover/args/init_bellman_cuda.rs | 33 +++-- .../zkstack/src/commands/prover/args/run.rs | 69 +++++++++- .../crates/zkstack/src/commands/prover/run.rs | 9 +- zkstack_cli/crates/zkstack/src/consts.rs | 2 + zkstack_cli/crates/zkstack/src/messages.rs | 2 + 20 files changed, 490 insertions(+), 56 deletions(-) create mode 100644 .github/workflows/ci-prover-e2e.yml create mode 100644 bin/prover_checkers/batch_availability_checker create mode 100755 bin/prover_checkers/batch_l1_status_checker create mode 100644 bin/prover_checkers/kill_prover create mode 100755 bin/prover_checkers/prover_jobs_status_checker rename docker/zk-environment/{20.04_amd64_cuda_11_8.Dockerfile => 22.04_amd64_cuda_11_8.Dockerfile} (95%) rename docker/zk-environment/{20.04_amd64_cuda_12_0.Dockerfile => 22.04_amd64_cuda_12_0.Dockerfile} (96%) diff --git a/.github/workflows/ci-prover-e2e.yml b/.github/workflows/ci-prover-e2e.yml new file mode 100644 index 00000000000..105ae1f1485 --- /dev/null +++ b/.github/workflows/ci-prover-e2e.yml @@ -0,0 +1,127 @@ +name: Workflow for testing prover component end-to-end +on: + workflow_call: + +jobs: + e2e-test: + runs-on: [ matterlabs-ci-gpu-l4-runner-prover-tests ] + env: + RUNNER_COMPOSE_FILE: "docker-compose-gpu-runner-cuda-12-0.yml" + + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + with: + submodules: "recursive" + fetch-depth: 0 + + - name: Setup environment + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + + mkdir -p prover_logs + + - name: Start services + run: | + run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull + mkdir -p ./volumes/postgres ./volumes/reth/data + docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait + ci_run sccache --start-server + + - name: Init + run: | + ci_run git config --global --add safe.directory "*" + ci_run chmod -R +x ./bin + + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + + ci_run zkstack chain create \ + --chain-name proving_chain \ + --chain-id sequential \ + --prover-mode gpu \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default true \ + --ignore-prerequisites + + ci_run zkstack ecosystem init --dev --verbose + ci_run zkstack prover init --dev --verbose + + echo "URL=$(grep "http_url" ./chains/proving_chain/configs/general.yaml | awk '{ print $2 }')" >> $GITHUB_ENV + - name: Build prover binaries + run: | + ci_run cargo build --release --workspace --manifest-path=prover/Cargo.toml + - name: Prepare prover subsystem + run: | + ci_run zkstack prover init-bellman-cuda --clone --verbose + ci_run zkstack prover setup-keys --mode=download --region=us --verbose + - name: Run server + run: | + ci_run zkstack server --uring --chain=proving_chain --components=api,tree,eth,state_keeper,commitment_generator,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip &>prover_logs/server.log & + - name: Run Gateway + run: | + ci_run zkstack prover run --component=gateway --docker=false &>prover_logs/gateway.log & + - name: Run Prover Job Monitor + run: | + ci_run zkstack prover run --component=prover-job-monitor --docker=false &>prover_logs/prover-job-monitor.log & + - name: Wait for batch to be passed through gateway + env: + DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain + BATCH_NUMBER: 1 + INTERVAL: 30 + TIMEOUT: 300 + run: | + PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ + ci_run ./bin/prover_checkers/batch_availability_checker + - name: Run Witness Generator + run: | + ci_run zkstack prover run --component=witness-generator --round=all-rounds --docker=false &>prover_logs/witness-generator.log & + - name: Run Circuit Prover + run: | + ci_run zkstack prover run --component=circuit-prover --witness-vector-generator-count=10 --docker=false &>prover_logs/circuit_prover.log & + - name: Wait for prover jobs to finish + env: + DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain + BATCH_NUMBER: 1 + INTERVAL: 30 + TIMEOUT: 1200 + run: | + PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ + ci_run ./bin/prover_checkers/prover_jobs_status_checker + + - name: Kill prover & start compressor + run: | + sudo ./bin/prover_checkers/kill_prover + + ci_run zkstack prover run --component=compressor --docker=false &>prover_logs/compressor.log & + - name: Wait for batch to be executed on L1 + env: + DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain + BATCH_NUMBER: 1 + INTERVAL: 30 + TIMEOUT: 600 + run: | + PASSED_ENV_VARS="BATCH_NUMBER,DATABASE_URL,URL,INTERVAL,TIMEOUT" \ + ci_run ./bin/prover_checkers/batch_l1_status_checker + + - name: Upload logs + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + if: always() + with: + name: prover_logs + path: prover_logs + + - name: Show sccache logs + if: always() + run: | + ci_run sccache --show-stats || true + ci_run cat /tmp/sccache_log.txt || true diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fd9dedf8af4..47ae3c51751 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -94,6 +94,12 @@ jobs: name: CI for Prover Components uses: ./.github/workflows/ci-prover-reusable.yml + e2e-for-prover: + name: E2E Test for Prover Components + needs: changed_files + if: ${{(needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + uses: ./.github/workflows/ci-prover-e2e.yml + ci-for-docs: needs: changed_files if: needs.changed_files.outputs.docs == 'true' diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 73303d15cb3..b9321c8f5d6 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -49,10 +49,10 @@ jobs: - docker/zk-environment/Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_11_8: - - docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_12: - - docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile - .github/workflows/zk-environment-publish.yml get_short_sha: @@ -245,7 +245,7 @@ jobs: if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 with: - file: docker/zk-environment/20.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile + file: docker/zk-environment/22.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile push: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/main' ) || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-${{ matrix.cuda_version }}:latest diff --git a/bin/prover_checkers/batch_availability_checker b/bin/prover_checkers/batch_availability_checker new file mode 100644 index 00000000000..ae7aade2f68 --- /dev/null +++ b/bin/prover_checkers/batch_availability_checker @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check availability for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM witness_inputs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + echo "Batch is not available yet. Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/batch_l1_status_checker b/bin/prover_checkers/batch_l1_status_checker new file mode 100755 index 00000000000..24f26e354ea --- /dev/null +++ b/bin/prover_checkers/batch_l1_status_checker @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Needs following configuration +# URL - URL of the API endpoint +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +echo "URL: $URL" + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the curl request and capture the response + RESPONSE=$(curl --silent --request POST \ + --url $URL \ + --header 'Content-Type: application/json' \ + --data '{ + "jsonrpc": "2.0", + "id": 1, + "method": "zks_getBlockDetails", + "params": [1] + }') + + # Parse the executedAt field using jq + EXECUTED_AT=$(echo $RESPONSE | jq -r '.result.executedAt') + + # Check if executedAt is not null + if [ "$EXECUTED_AT" != "null" ] && [ -n "$EXECUTED_AT" ]; then + echo "executedAt is not null: $EXECUTED_AT" + echo "true" + exit 0 # Exit with zero status to succeed CI + else + DATABASE_STATUS=$(psql $DATABASE_URL -c "SELECT status FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + echo "executedAt is null, database status is $DATABASE_STATUS, retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/kill_prover b/bin/prover_checkers/kill_prover new file mode 100644 index 00000000000..2a65aea2d67 --- /dev/null +++ b/bin/prover_checkers/kill_prover @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Use pkill to find and kill processes using circuit prover +if ! pkill -f 'zksync_circuit_prover|zkstack prover run --component=circuit-prover'; then + echo "No processes are currently using the GPU." + exit 0 +fi + +echo "All GPU-related processes have been killed." diff --git a/bin/prover_checkers/prover_jobs_status_checker b/bin/prover_checkers/prover_jobs_status_checker new file mode 100755 index 00000000000..6816d9a2d14 --- /dev/null +++ b/bin/prover_checkers/prover_jobs_status_checker @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check readiness for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER AND status = 'queued';" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + STATUS=$(psql $DATABASE_URL -c "SELECT COUNT(*), status FROM prover_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER GROUP BY status;" -t -A) + echo "Current status is $STATUS" + echo "Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 618a786ea65..51780f03230 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -30,7 +30,7 @@ pub async fn run_server( mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); - tracing::debug!("Starting proof data handler server on {bind_address}"); + tracing::info!("Starting proof data handler server on {bind_address}"); let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); let listener = tokio::net::TcpListener::bind(bind_address) diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index 35a0faeb962..c930fa376f5 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -3,6 +3,8 @@ services: reth: restart: always image: "ghcr.io/paradigmxyz/reth:v1.0.6" + ports: + - 127.0.0.1:8545:8545 volumes: - type: bind source: ./volumes/reth/data @@ -12,11 +14,9 @@ services: target: /chaindata command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config - ports: - - 127.0.0.1:8545:8545 zk: - image: ghcr.io/matter-labs/zk-environment:cuda-12-0-latest + image: ghcr.io/matter-labs/zk-environment:cuda-12_0-latest depends_on: - reth - postgres @@ -49,11 +49,18 @@ services: - /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host + pid: host deploy: resources: reservations: devices: - capabilities: [ gpu ] + postgres: image: "postgres:14" command: postgres -c 'max_connections=200' diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index f95ae0d5f54..32665eb7010 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -16,7 +16,7 @@ services: - 127.0.0.1:8545:8545 zk: - image: "ghcr.io/matter-labs/zk-environment:cuda-11-8-latest" + image: "ghcr.io/matter-labs/zk-environment:cuda-11_8-latest" container_name: zk depends_on: - reth @@ -40,6 +40,11 @@ services: - GITHUB_WORKSPACE=$GITHUB_WORKSPACE env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host deploy: resources: reservations: diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile similarity index 95% rename from docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile index 90f089ba8bd..fe44d55acbb 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04@sha256:3246518d9735254519e1b2ff35f95686e4a5011c90c85344c1f38df7bae9dd37 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -31,19 +31,19 @@ RUN apt-get update && apt-get install -y \ wget \ bzip2 \ unzip \ - hub + hub \ + curl \ + gnutls-bin git \ + build-essential \ + clang \ + lldb \ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - curl \ - gnutls-bin git \ - build-essential \ - clang \ - lldb \ - lld \ liburing-dev \ libclang-dev @@ -83,6 +83,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile similarity index 96% rename from docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile index b6b023a5b7f..da041b12181 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -30,18 +30,18 @@ RUN apt-get update && apt-get install -y \ gnupg2 \ postgresql-client \ hub \ - unzip + unzip \ + gnutls-bin \ + build-essential \ + clang \ + lldb\ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - gnutls-bin \ - build-essential \ - clang \ - lldb\ - lld \ liburing-dev \ libclang-dev @@ -81,6 +81,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ diff --git a/prover/crates/lib/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs index 4a8a1b3e406..37e004d54ec 100644 --- a/prover/crates/lib/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -28,8 +28,8 @@ pub mod keys; pub mod queue; // THESE VALUES SHOULD BE UPDATED ON ANY PROTOCOL UPGRADE OF PROVERS -pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; -pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(2); +pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version25; +pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(0); pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { minor: PROVER_PROTOCOL_VERSION, patch: PROVER_PROTOCOL_PATCH, diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs index 37ee2e076ab..82986d9b41a 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -57,6 +57,18 @@ pub async fn init_configs( } let mut general_config = chain_config.get_general_config()?; + + if general_config.proof_data_handler_config.is_some() && general_config.prover_gateway.is_some() + { + let proof_data_handler_config = general_config.proof_data_handler_config.clone().unwrap(); + let mut prover_gateway = general_config.prover_gateway.clone().unwrap(); + + prover_gateway.api_url = + format!("http://127.0.0.1:{}", proof_data_handler_config.http_port); + + general_config.prover_gateway = Some(prover_gateway); + } + let mut consensus_config = general_config .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs index 94fea1389d2..280b5b2e91d 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs @@ -33,6 +33,9 @@ use crate::{ #[derive(Debug, Clone, Parser, Default)] pub struct ProverInitArgs { + #[clap(long)] + pub dev: bool, + // Proof store object #[clap(long)] pub proof_store_dir: Option, @@ -228,6 +231,10 @@ impl ProverInitArgs { ) -> anyhow::Result { logger::info(MSG_GETTING_PROOF_STORE_CONFIG); + if self.dev { + return Ok(self.handle_file_backed_config(Some(DEFAULT_PROOF_STORE_DIR.to_string()))); + } + if self.proof_store_dir.is_some() { return Ok(self.handle_file_backed_config(self.proof_store_dir.clone())); } @@ -277,6 +284,11 @@ impl ProverInitArgs { shell: &Shell, ) -> anyhow::Result> { logger::info(MSG_GETTING_PUBLIC_STORE_CONFIG); + + if self.dev { + return Ok(None); + } + let shall_save_to_public_bucket = self .shall_save_to_public_bucket .unwrap_or_else(|| PromptConfirm::new(MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT).ask()); @@ -345,6 +357,12 @@ impl ProverInitArgs { &self, default_path: &str, ) -> Option { + if self.dev { + return Some(CompressorKeysArgs { + path: Some(default_path.to_string()), + }); + } + let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) .default(false) @@ -363,6 +381,9 @@ impl ProverInitArgs { } fn fill_setup_keys_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } let args = self.setup_keys_args.clone(); if self.setup_keys.unwrap_or_else(|| { @@ -475,6 +496,10 @@ impl ProverInitArgs { } fn fill_bellman_cuda_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } + let args = self.bellman_cuda_config.clone(); if self.bellman_cuda.unwrap_or_else(|| { PromptConfirm::new(MSG_INITIALIZE_BELLMAN_CUDA_PROMPT) @@ -488,6 +513,10 @@ impl ProverInitArgs { } fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { + if self.dev { + return CloudConnectionMode::Local; + } + let cloud_type = self.cloud_type.clone().unwrap_or_else(|| { PromptSelect::new( MSG_CLOUD_TYPE_PROMPT, @@ -503,25 +532,32 @@ impl ProverInitArgs { &self, config: &ChainConfig, ) -> Option { - let setup_database = self - .setup_database - .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); + let setup_database = self.dev + || self + .setup_database + .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); if setup_database { let DBNames { prover_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); - let dont_drop = self.dont_drop.unwrap_or_else(|| { - !PromptConfirm::new("Do you want to drop the database?") - .default(true) - .ask() - }); + let dont_drop = if !self.dev { + self.dont_drop.unwrap_or_else(|| { + !PromptConfirm::new("Do you want to drop the database?") + .default(true) + .ask() + }) + } else { + false + }; - if self.use_default.unwrap_or_else(|| { - PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) - .default(true) - .ask() - }) { + if self.dev + || self.use_default.unwrap_or_else(|| { + PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) + .default(true) + .ask() + }) + { Some(ProverDatabaseConfig { database_config: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop, diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs index ba204b0be9e..98a5c78be2a 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs @@ -10,7 +10,9 @@ use crate::messages::{ #[derive(Debug, Clone, Parser, Default, Serialize, Deserialize)] pub struct InitBellmanCudaArgs { - #[clap(long)] + #[clap(long, conflicts_with_all(["bellman_cuda_dir"]))] + pub clone: bool, + #[clap(long, conflicts_with_all(["clone"]))] pub bellman_cuda_dir: Option, } @@ -31,19 +33,26 @@ impl std::fmt::Display for BellmanCudaPathSelection { impl InitBellmanCudaArgs { pub fn fill_values_with_prompt(self) -> InitBellmanCudaArgs { - let bellman_cuda_dir = self.bellman_cuda_dir.unwrap_or_else(|| { - match PromptSelect::new( - MSG_BELLMAN_CUDA_ORIGIN_SELECT, - BellmanCudaPathSelection::iter(), - ) - .ask() - { - BellmanCudaPathSelection::Clone => "".to_string(), - BellmanCudaPathSelection::Path => Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask(), - } - }); + let bellman_cuda_dir = if self.clone { + "".to_string() + } else { + self.bellman_cuda_dir.unwrap_or_else(|| { + match PromptSelect::new( + MSG_BELLMAN_CUDA_ORIGIN_SELECT, + BellmanCudaPathSelection::iter(), + ) + .ask() + { + BellmanCudaPathSelection::Clone => "".to_string(), + BellmanCudaPathSelection::Path => { + Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask() + } + } + }) + }; InitBellmanCudaArgs { + clone: self.clone, bellman_cuda_dir: Some(bellman_cuda_dir), } } diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs index 59a82152f1f..d7600ba2d31 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs @@ -8,7 +8,8 @@ use strum::{EnumIter, IntoEnumIterator}; use crate::{ consts::{ - COMPRESSOR_BINARY_NAME, COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, + CIRCUIT_PROVER_BINARY_NAME, CIRCUIT_PROVER_DOCKER_IMAGE, COMPRESSOR_BINARY_NAME, + COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, PROVER_GATEWAY_BINARY_NAME, PROVER_GATEWAY_DOCKER_IMAGE, PROVER_JOB_MONITOR_BINARY_NAME, PROVER_JOB_MONITOR_DOCKER_IMAGE, WITNESS_GENERATOR_BINARY_NAME, WITNESS_GENERATOR_DOCKER_IMAGE, WITNESS_VECTOR_GENERATOR_BINARY_NAME, @@ -30,6 +31,8 @@ pub struct ProverRunArgs { pub witness_vector_generator_args: WitnessVectorGeneratorArgs, #[clap(flatten)] pub fri_prover_args: FriProverRunArgs, + #[clap(flatten)] + pub circuit_prover_args: CircuitProverArgs, #[clap(long)] pub docker: Option, } @@ -46,6 +49,8 @@ pub enum ProverComponent { WitnessVectorGenerator, #[strum(to_string = "Prover")] Prover, + #[strum(to_string = "CircuitProver")] + CircuitProver, #[strum(to_string = "Compressor")] Compressor, #[strum(to_string = "ProverJobMonitor")] @@ -59,6 +64,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_DOCKER_IMAGE, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, Self::Prover => PROVER_DOCKER_IMAGE, + Self::CircuitProver => CIRCUIT_PROVER_DOCKER_IMAGE, Self::Compressor => COMPRESSOR_DOCKER_IMAGE, Self::ProverJobMonitor => PROVER_JOB_MONITOR_DOCKER_IMAGE, } @@ -70,6 +76,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_BINARY_NAME, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_BINARY_NAME, Self::Prover => PROVER_BINARY_NAME, + Self::CircuitProver => CIRCUIT_PROVER_BINARY_NAME, Self::Compressor => COMPRESSOR_BINARY_NAME, Self::ProverJobMonitor => PROVER_JOB_MONITOR_BINARY_NAME, } @@ -78,10 +85,10 @@ impl ProverComponent { pub fn get_application_args(&self, in_docker: bool) -> anyhow::Result> { let mut application_args = vec![]; - if self == &Self::Prover || self == &Self::Compressor { + if self == &Self::Prover || self == &Self::Compressor || self == &Self::CircuitProver { if in_docker { application_args.push("--gpus=all".to_string()); - } else { + } else if self != &Self::CircuitProver { application_args.push("--features=gpu".to_string()); } } @@ -160,6 +167,26 @@ impl ProverComponent { )); }; } + Self::CircuitProver => { + if args.circuit_prover_args.max_allocation.is_some() { + additional_args.push(format!( + "--max-allocation={}", + args.fri_prover_args.max_allocation.unwrap() + )); + }; + if args + .circuit_prover_args + .witness_vector_generator_count + .is_some() + { + additional_args.push(format!( + "--witness-vector-generator-count={}", + args.circuit_prover_args + .witness_vector_generator_count + .unwrap() + )); + }; + } _ => {} }; @@ -211,6 +238,37 @@ impl WitnessVectorGeneratorArgs { } } +#[derive(Debug, Clone, Parser, Default)] +pub struct CircuitProverArgs { + #[clap(long)] + pub witness_vector_generator_count: Option, + #[clap(long)] + pub max_allocation: Option, +} + +impl CircuitProverArgs { + pub fn fill_values_with_prompt( + self, + component: ProverComponent, + ) -> anyhow::Result { + if component != ProverComponent::CircuitProver { + return Ok(Self::default()); + } + + let witness_vector_generator_count = + self.witness_vector_generator_count.unwrap_or_else(|| { + Prompt::new("Number of WVG jobs to run in parallel") + .default("1") + .ask() + }); + + Ok(CircuitProverArgs { + witness_vector_generator_count: Some(witness_vector_generator_count), + max_allocation: self.max_allocation, + }) + } +} + #[derive(Debug, Clone, Parser, Default)] pub struct FriProverRunArgs { /// Memory allocation limit in bytes (for prover component) @@ -232,6 +290,10 @@ impl ProverRunArgs { .witness_vector_generator_args .fill_values_with_prompt(component)?; + let circuit_prover_args = self + .circuit_prover_args + .fill_values_with_prompt(component)?; + let docker = self.docker.unwrap_or_else(|| { Prompt::new("Do you want to run Docker image for the component?") .default("false") @@ -243,6 +305,7 @@ impl ProverRunArgs { witness_generator_args, witness_vector_generator_args, fri_prover_args: self.fri_prover_args, + circuit_prover_args, docker: Some(docker), }) } diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs index ed2f5b41a86..863816b9ae6 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs @@ -8,7 +8,8 @@ use xshell::{cmd, Shell}; use super::args::run::{ProverComponent, ProverRunArgs}; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, - MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, + MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR, MSG_RUNNING_COMPRESSOR, + MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, MSG_RUNNING_PROVER_JOB_MONITOR_ERR, MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, @@ -49,6 +50,12 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() } (MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR) } + ProverComponent::CircuitProver => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + } + (MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR) + } ProverComponent::Compressor => { if !in_docker { check_prerequisites(shell, &GPU_PREREQUISITES, false); diff --git a/zkstack_cli/crates/zkstack/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs index df27d2f02d2..ba00af77b5a 100644 --- a/zkstack_cli/crates/zkstack/src/consts.rs +++ b/zkstack_cli/crates/zkstack/src/consts.rs @@ -22,6 +22,7 @@ pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:l pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-vector-generator:latest2.0"; pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; +pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu:latest2.0"; pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; @@ -29,6 +30,7 @@ pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_generator"; pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; +pub const CIRCUIT_PROVER_BINARY_NAME: &str = "zksync_circuit_prover"; pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; diff --git a/zkstack_cli/crates/zkstack/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs index d1d86db8398..6d6a1ceb566 100644 --- a/zkstack_cli/crates/zkstack/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -351,6 +351,7 @@ pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job moni pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; pub(super) const MSG_RUNNING_PROVER: &str = "Running prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER: &str = "Running circuit prover"; pub(super) const MSG_RUNNING_COMPRESSOR: &str = "Running compressor"; pub(super) const MSG_RUN_COMPONENT_PROMPT: &str = "What component do you want to run?"; pub(super) const MSG_RUNNING_PROVER_GATEWAY_ERR: &str = "Failed to run prover gateway"; @@ -359,6 +360,7 @@ pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR: &str = "Failed to run witness vector generator"; pub(super) const MSG_RUNNING_COMPRESSOR_ERR: &str = "Failed to run compressor"; pub(super) const MSG_RUNNING_PROVER_ERR: &str = "Failed to run prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER_ERR: &str = "Failed to run circuit prover"; pub(super) const MSG_PROOF_STORE_CONFIG_PROMPT: &str = "Select where you would like to store the proofs"; pub(super) const MSG_PROOF_STORE_DIR_PROMPT: &str = From 525ba9f22e3ad8aad0242b27daed4c1f67b293c1 Mon Sep 17 00:00:00 2001 From: Vlad Bochok <41153528+vladbochok@users.noreply.github.com> Date: Wed, 16 Oct 2024 17:53:29 +0400 Subject: [PATCH 074/140] fix(protocol upgrade tool): Remove legacy from protocol upgrade tool (#3064) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove the calldata generated for `Governance.sol`. We don't use `Governance.sol` anymore and so this calldata is not needed. ## Why ❔ ## Checklist - [ x PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- infrastructure/protocol-upgrade/README.md | 18 +- .../protocol-upgrade/src/transaction.ts | 410 +----------------- 2 files changed, 16 insertions(+), 412 deletions(-) diff --git a/infrastructure/protocol-upgrade/README.md b/infrastructure/protocol-upgrade/README.md index da5ee313dab..c7998b96123 100644 --- a/infrastructure/protocol-upgrade/README.md +++ b/infrastructure/protocol-upgrade/README.md @@ -25,13 +25,15 @@ If not provided as arguments, the tool can retrieve certain values from environm 2. `l2rpc` - `API_WEB3_JSON_RPC_HTTP_URL` 3. `create2-address` - `CONTRACTS_CREATE2_FACTORY_ADDR` 4. `zksync-address` - `CONTRACTS_DIAMOND_PROXY_ADDR` -5. `nonce` - Taken from the node via `l1rpc` -6. `gas-price` - Taken from the node via `l1rpc` -7. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, +5. `upgrade-address` - `CONTRACTS_DEFAULT_UPGRADE_ADDR` +6. `l2-upgrader-address` - `CONTRACTS_L2_DEFAULT_UPGRADE_ADDR` +7. `nonce` - Taken from the node via `l1rpc` +8. `gas-price` - Taken from the node via `l1rpc` +9. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, `testnet2`, `stage2`, `mainnet2`. Each upgrade on different environments is performed separately since the contract addresses differ between environments. -8. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it - explicitly. +10. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it + explicitly. ### Create a Protocol Upgrade Proposal @@ -215,8 +217,7 @@ $ zk f yarn start transactions build-default \ --l2-upgrader-address \ --diamond-upgrade-proposal-id \ --l1rpc \ ---zksync-address \ ---use-new-governance +--zksync-address ``` To execute the `proposeTransparentUpgrade` transaction on L1, use the following command: @@ -228,7 +229,6 @@ $ zk f yarn start transactions propose-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -241,7 +241,6 @@ $ zk f yarn start transactions execute-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -254,6 +253,5 @@ $ zk f yarn start transactions cancel-upgrade \ --zksync-address \ --gas-price \ --nonce \ ---new-governance \ --environment ``` diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index dfea3a3bfc3..e7a3f32b322 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -3,12 +3,10 @@ import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-c import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, AdminFacetFactory, - GovernanceFactory, StateTransitionManagerFactory, ChainAdminFactory } from 'l1-contracts/typechain'; import { FacetCut } from 'l1-contracts/src.ts/diamondCut'; -import { IZkSyncFactory } from '../pre-boojum/IZkSyncFactory'; import { ComplexUpgraderFactory } from 'system-contracts/typechain'; import { getCommonDataFileName, @@ -89,7 +87,6 @@ export interface ProposedUpgrade { postUpgradeCalldata: BytesLike; upgradeTimestamp: ethers.BigNumber; newProtocolVersion: BigNumberish; - newAllowList: string; } function buildNoopL2UpgradeTx(): L2CanonicalTransaction { @@ -123,10 +120,8 @@ export function buildProposeUpgrade( bootloaderHash?: BytesLike, defaultAccountHash?: BytesLike, verifier?: string, - newAllowList?: string, l2ProtocolUpgradeTx?: L2CanonicalTransaction ): ProposedUpgrade { - newAllowList = newAllowList ?? ethers.constants.AddressZero; bootloaderHash = bootloaderHash ?? ethers.constants.HashZero; defaultAccountHash = defaultAccountHash ?? ethers.constants.HashZero; l1ContractsUpgradeCalldata = l1ContractsUpgradeCalldata ?? '0x'; @@ -142,8 +137,7 @@ export function buildProposeUpgrade( postUpgradeCalldata, upgradeTimestamp, factoryDeps: [], - newProtocolVersion, - newAllowList + newProtocolVersion }; } @@ -171,43 +165,6 @@ export function prepareDefaultCalldataForL2upgrade(forcedDeployments: ForceDeplo return complexUpgraderCalldata; } -interface GovernanceTx { - scheduleCalldata: string; - executeCalldata: string; - operation: any; -} - -function prepareGovernanceTxs(target: string, data: BytesLike): GovernanceTx { - const govCall = { - target: target, - value: 0, - data: data - }; - - const operation = { - calls: [govCall], - predecessor: ethers.constants.HashZero, - salt: ethers.constants.HashZero - }; - - const governance = new GovernanceFactory(); - - // Get transaction data of the `scheduleTransparent` - const scheduleCalldata = governance.interface.encodeFunctionData('scheduleTransparent', [ - operation, - 0 // delay - ]); - - // Get transaction data of the `execute` - const executeCalldata = governance.interface.encodeFunctionData('execute', [operation]); - - return { - scheduleCalldata, - executeCalldata, - operation - }; -} - function prepareChainAdminCalldata(target: string, data: BytesLike): string { const call = { target: target, @@ -221,14 +178,13 @@ function prepareChainAdminCalldata(target: string, data: BytesLike): string { return calldata; } -export function prepareTransparentUpgradeCalldataForNewGovernance( +export function prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, newProtocolVersion, initCalldata, upgradeAddress: string, facetCuts: FacetCut[], - stmAddress: string, zksyncAddress: string, prepareDirectOperation?: boolean, chainId?: string @@ -247,9 +203,6 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( newProtocolVersion ]); - const { scheduleCalldata: stmScheduleTransparentOperation, executeCalldata: stmExecuteOperation } = - prepareGovernanceTxs(stmAddress, stmUpgradeCalldata); - // Prepare calldata for upgrading diamond proxy let adminFacet = new AdminFacetFactory(); const diamondProxyUpgradeCalldata = adminFacet.interface.encodeFunctionData('upgradeChainFromVersion', [ @@ -257,30 +210,12 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( diamondCut ]); - const { - scheduleCalldata: scheduleTransparentOperation, - executeCalldata: executeOperation, - operation: governanceOperation - } = prepareGovernanceTxs(zksyncAddress, diamondProxyUpgradeCalldata); - - const newExecuteChainUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); - - const legacyScheduleTransparentOperation = adminFacet.interface.encodeFunctionData('executeUpgrade', [diamondCut]); - const { scheduleCalldata: legacyScheduleOperation, executeCalldata: legacyExecuteOperation } = prepareGovernanceTxs( - zksyncAddress, - legacyScheduleTransparentOperation - ); + const chainAdminUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); let result: any = { - stmScheduleTransparentOperation, - stmExecuteOperation, - scheduleTransparentOperation, - executeOperation, - newExecuteChainUpgradeCalldata, - diamondCut, - governanceOperation, - legacyScheduleOperation, - legacyExecuteOperation + stmUpgradeCalldata, + chainAdminUpgradeCalldata, + diamondCut }; if (prepareDirectOperation) { @@ -290,13 +225,9 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( const stmDirecUpgradeCalldata = stm.interface.encodeFunctionData('executeUpgrade', [chainId, diamondCut]); - const { scheduleCalldata: stmScheduleOperationDirect, executeCalldata: stmExecuteOperationDirect } = - prepareGovernanceTxs(stmAddress, stmDirecUpgradeCalldata); - result = { ...result, - stmScheduleOperationDirect, - stmExecuteOperationDirect + stmDirecUpgradeCalldata }; } @@ -305,14 +236,10 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( export function buildDefaultUpgradeTx( environment, - diamondUpgradeProposalId, upgradeAddress, - l2UpgraderAddress, oldProtocolVersion, oldProtocolVersionDeadline, upgradeTimestamp, - newAllowList, - stmAddress, zksyncAddress, postUpgradeCalldataFlag, prepareDirectOperation?, @@ -389,20 +316,18 @@ export function buildDefaultUpgradeTx( bootloaderHash, defaultAAHash, cryptoVerifierAddress, - newAllowList, l2UpgradeTx ); let l1upgradeCalldata = prepareDefaultCalldataForL1upgrade(proposeUpgradeTx); - let upgradeData = prepareTransparentUpgradeCalldataForNewGovernance( + let upgradeData = prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, packedNewProtocolVersion, l1upgradeCalldata, upgradeAddress, facetCuts, - stmAddress, zksyncAddress, prepareDirectOperation, chainId @@ -414,7 +339,6 @@ export function buildDefaultUpgradeTx( upgradeAddress, protocolVersionSemVer: newProtocolVersionSemVer, packedProtocolVersion: packedNewProtocolVersion, - diamondUpgradeProposalId, upgradeTimestamp, ...upgradeData }; @@ -423,31 +347,6 @@ export function buildDefaultUpgradeTx( console.log('Default upgrade transactions are generated'); } -async function sendTransaction( - calldata: BytesLike, - privateKey: string, - l1rpc: string, - to: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number -) { - const wallet = getWallet(l1rpc, privateKey); - gasPrice = gasPrice ?? (await wallet.provider.getGasPrice()); - nonce = nonce ?? (await wallet.getTransactionCount()); - const tx = await wallet.sendTransaction({ - to, - data: calldata, - value: 0, - gasLimit: 10_000_000, - gasPrice, - nonce - }); - console.log('Transaction hash: ', tx.hash); - await tx.wait(); - console.log('Transaction is executed'); -} - export function getWallet(l1rpc, privateKey) { if (!l1rpc) { l1rpc = web3Url(); @@ -462,99 +361,6 @@ export function getWallet(l1rpc, privateKey) { ).connect(provider); } -async function sendPreparedTx( - privateKey: string, - l1rpc: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - governanceAddr: string, - transactionsJsonField: string, - logText: string -) { - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - const calldata = transactions[transactionsJsonField]; - - console.log(`${logText} for protocolVersion ${transactions.protocolVersion}`); - await sendTransaction(calldata, privateKey, l1rpc, governanceAddr, environment, gasPrice, nonce); -} - -async function cancelUpgrade( - privateKey: string, - l1rpc: string, - zksyncAddress: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - execute: boolean, - newGovernanceAddress: string -) { - if (newGovernanceAddress != null) { - let wallet = getWallet(l1rpc, privateKey); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - let governance = GovernanceFactory.connect(newGovernanceAddress, wallet); - const operation = transactions.governanceOperation; - - const operationId = await governance.hashOperation(operation); - - console.log(`Cancel upgrade operation with id: ${operationId}`); - if (execute) { - const tx = await governance.cancel(operationId); - await tx.wait(); - console.log('Operation canceled'); - } else { - const calldata = governance.interface.encodeFunctionData('cancel', [operationId]); - console.log(`Cancel upgrade calldata: ${calldata}`); - } - } else { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - let wallet = getWallet(l1rpc, privateKey); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - const transparentUpgrade = transactions.transparentUpgrade; - const diamondUpgradeProposalId = transactions.diamondUpgradeProposalId; - - const proposalHash = await zkSync.upgradeProposalHash( - transparentUpgrade, - diamondUpgradeProposalId, - ethers.constants.HashZero - ); - - console.log(`Cancel upgrade with hash: ${proposalHash}`); - let cancelUpgradeCalldata = zkSync.interface.encodeFunctionData('cancelUpgradeProposal', [proposalHash]); - if (execute) { - await sendTransaction( - cancelUpgradeCalldata, - privateKey, - l1rpc, - zksyncAddress, - environment, - gasPrice, - nonce - ); - } else { - console.log(`Cancel upgrade calldata: ${cancelUpgradeCalldata}`); - } - } -} - -async function getNewDiamondUpgradeProposalId(l1rpc: string, zksyncAddress: string) { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - // We don't care about the wallet here, we just need to make a get call. - let wallet = getWallet(l1rpc, undefined); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - let proposalId = await zkSync.getCurrentProposalId(); - proposalId = proposalId.add(1); - console.log( - `New proposal id: ${proposalId} for ${zksyncAddress} network: ${JSON.stringify( - await wallet.provider.getNetwork() - )}` - ); - return proposalId; -} - export const command = new Command('transactions').description( 'prepare the transactions and their calldata for the upgrade' ); @@ -564,223 +370,23 @@ command .requiredOption('--upgrade-timestamp ') .option('--upgrade-address ') .option('--environment ') - .option('--new-allow-list ') - .option('--l2-upgrader-address ') - .option('--diamond-upgrade-proposal-id ') .option('--old-protocol-version ') .option('--old-protocol-version-deadline ') .option('--l1rpc ') .option('--zksync-address ') - .option('--state-transition-manager-address ') .option('--chain-id ') .option('--prepare-direct-operation ') - .option('--use-new-governance') .option('--post-upgrade-calldata') .action(async (options) => { - if (!options.useNewGovernance) { - // TODO(X): remove old governance functionality from the protocol upgrade tool - throw new Error('Old governance is not supported anymore'); - } - - let diamondUpgradeProposalId = options.diamondUpgradeProposalId; - if (!diamondUpgradeProposalId && !options.useNewGovernance) { - diamondUpgradeProposalId = await getNewDiamondUpgradeProposalId(options.l1rpc, options.zksyncAddress); - } - buildDefaultUpgradeTx( options.environment, - diamondUpgradeProposalId, options.upgradeAddress, - options.l2UpgraderAddress, options.oldProtocolVersion, options.oldProtocolVersionDeadline, options.upgradeTimestamp, - options.newAllowList, - options.stateTransitionManagerAddress, options.zksyncAddress, options.postUpgradeCalldata, options.prepareDirectOperation, options.chainId ); }); - -command - .command('propose-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleTransparentOperation', - 'Proposing upgrade for STM' - ); - }); - -command - .command('execute-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperation', - 'Executing upgrade for STM' - ); - }); - -command - .command('propose-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'scheduleTransparentOperation', - 'Proposing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('execute-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'executeOperation', - 'Executing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('propose-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('execute-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('cancel-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--execute') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await cancelUpgrade( - options.privateKey, - options.l1rpc, - options.zksyncAddress, - options.environment, - options.gasPrice, - options.nonce, - options.execute, - options.newGovernance - ); - }); From 8790240f95211b586df6ac5a9a0c1d948b425aa7 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Wed, 16 Oct 2024 21:26:22 +0700 Subject: [PATCH 075/140] fix(ci): Conditional to build contracts in new CI (#3106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Do not build contracts if not needed in new CI ## Why ❔ Speed up CI ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/ci.yml | 5 ++++ .../new-build-contract-verifier-template.yml | 25 ++++++------------- .github/workflows/new-build-core-template.yml | 25 ++++++------------- 3 files changed, 21 insertions(+), 34 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 47ae3c51751..2f29fe98f0e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,6 +42,9 @@ jobs: - '!prover/extract-setup-data-keys.sh' - 'docker/prover*/**' - '.github/workflows/build-prover-template.yml' + - '.github/workflows/new-build-prover-template.yml' + - '.github/workflows/build-witness-generator-template.yml' + - '.github/workflows/new-build-witness-generator-template.yml' - '.github/workflows/ci-prover-reusable.yml' - 'docker-compose-runner-nightly.yml' - '!**/*.md' @@ -53,7 +56,9 @@ jobs: - 'docker/external-node/**' - 'docker/server/**' - '.github/workflows/build-core-template.yml' + - '.github/workflows/new-build-core-template.yml' - '.github/workflows/build-contract-verifier-template.yml' + - '.github/workflows/new-build-contract-verifier-template.yml' - '.github/workflows/ci-core-reusable.yml' - '.github/workflows/ci-core-lint-reusable.yml' - 'Cargo.toml' diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml index b5286782fad..9b23cda6f02 100644 --- a/.github/workflows/new-build-contract-verifier-template.yml +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -74,36 +74,30 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' run: | mkdir ./foundry-zksync curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz @@ -112,6 +106,7 @@ jobs: echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -129,18 +124,14 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash - run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml index e8a41a7e064..c4aeb9180fd 100644 --- a/.github/workflows/new-build-core-template.yml +++ b/.github/workflows/new-build-core-template.yml @@ -79,36 +79,30 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' run: | mkdir ./foundry-zksync curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz @@ -117,6 +111,7 @@ jobs: echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -134,18 +129,14 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash - run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json From 223e4dd59414904f2b26afffc4b72bb78266b783 Mon Sep 17 00:00:00 2001 From: Danil Date: Wed, 16 Oct 2024 16:53:02 +0200 Subject: [PATCH 076/140] fix(zkstack): Move installation always to .local/bin (#3108) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- zkstack_cli/zkstackup/install | 3 +-- zkstack_cli/zkstackup/zkstackup | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/zkstack_cli/zkstackup/install b/zkstack_cli/zkstackup/install index f20ba4dd545..849f0699bc3 100755 --- a/zkstack_cli/zkstackup/install +++ b/zkstack_cli/zkstackup/install @@ -3,8 +3,7 @@ set -eo pipefail BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/zkstackup" -HOME_DIR=${XDG_CONFIG_HOME:-$HOME} -BIN_DIR="$HOME_DIR/.local/bin" +BIN_DIR="$HOME/.local/bin" BIN_PATH="$BIN_DIR/zkstackup" main() { diff --git a/zkstack_cli/zkstackup/zkstackup b/zkstack_cli/zkstackup/zkstackup index 20a061620f9..e91bbc17905 100755 --- a/zkstack_cli/zkstackup/zkstackup +++ b/zkstack_cli/zkstackup/zkstackup @@ -1,8 +1,7 @@ #!/usr/bin/env bash set -eo pipefail -HOME_DIR=${XDG_CONFIG_HOME:-$HOME} -LOCAL_DIR=${LOCAL_DIR:-"$HOME_DIR/.local"} +LOCAL_DIR="$HOME/.local/" BIN_DIR="$LOCAL_DIR/bin" BINS=() From 30ddb292977340beab37a81f75c35480cbdd59d3 Mon Sep 17 00:00:00 2001 From: Danil Date: Wed, 16 Oct 2024 16:53:26 +0200 Subject: [PATCH 077/140] fix(call_tracer): Flat call tracer fixes for blocks (#3095) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Make flat call tracer more compatible ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --------- Signed-off-by: Danil --- ...16fe37110ebc3fb3981b2626a0bf2edd00e69.json | 40 +++++++++ ...9027b18d108a05f5855115ba36045e3b1850.json} | 12 ++- ...c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json | 28 ------- core/lib/dal/src/blocks_web3_dal.rs | 32 ++++++-- core/lib/dal/src/transactions_dal.rs | 23 ++++-- core/lib/types/src/api/mod.rs | 9 +- core/lib/types/src/debug_flat_call.rs | 17 ++++ .../api_server/src/web3/namespaces/debug.rs | 82 +++++++++---------- core/node/api_server/src/web3/tests/debug.rs | 29 +++---- 9 files changed, 163 insertions(+), 109 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json rename core/lib/dal/.sqlx/{query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json => query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json} (50%) delete mode 100644 core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json diff --git a/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json new file mode 100644 index 00000000000..189e28f565d --- /dev/null +++ b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n index_in_block,\n miniblocks.number AS \"miniblock_number!\",\n miniblocks.hash AS \"miniblocks_hash!\"\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "miniblock_number!", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "miniblocks_hash!", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + true, + true, + false, + false + ] + }, + "hash": "0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69" +} diff --git a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json similarity index 50% rename from core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json rename to core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json index 06d3461c3fa..4f44879b6ec 100644 --- a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json +++ b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json @@ -1,12 +1,17 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n protocol_version,\n hash\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { "ordinal": 0, "name": "protocol_version", "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "hash", + "type_info": "Bytea" } ], "parameters": { @@ -15,8 +20,9 @@ ] }, "nullable": [ - true + true, + false ] }, - "hash": "894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2" + "hash": "2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850" } diff --git a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json b/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json deleted file mode 100644 index 3b8accb4fda..00000000000 --- a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "index_in_block", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - true, - true - ] - }, - "hash": "96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d" -} diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 829e15b5710..4cb57798638 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -5,6 +5,7 @@ use zksync_db_connection::{ use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, + debug_flat_call::CallTraceMeta, fee_model::BatchFeeInput, l2_to_l1_log::L2ToL1Log, web3::{BlockHeader, Bytes}, @@ -531,11 +532,12 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_traces_for_l2_block( &mut self, block_number: L2BlockNumber, - ) -> DalResult> { - let protocol_version = sqlx::query!( + ) -> DalResult> { + let row = sqlx::query!( r#" SELECT - protocol_version + protocol_version, + hash FROM miniblocks WHERE @@ -543,14 +545,20 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(block_number.0) ) - .try_map(|row| row.protocol_version.map(parse_protocol_version).transpose()) + .try_map(|row| { + row.protocol_version + .map(parse_protocol_version) + .transpose() + .map(|val| (val, H256::from_slice(&row.hash))) + }) .instrument("get_traces_for_l2_block#get_l2_block_protocol_version_id") .with_arg("l2_block_number", &block_number) .fetch_optional(self.storage) .await?; - let Some(protocol_version) = protocol_version else { + let Some((protocol_version, block_hash)) = row else { return Ok(Vec::new()); }; + let protocol_version = protocol_version.unwrap_or_else(ProtocolVersionId::last_potentially_undefined); @@ -577,9 +585,15 @@ impl BlocksWeb3Dal<'_, '_> { .await? .into_iter() .map(|call_trace| { - let hash = H256::from_slice(&call_trace.tx_hash); + let tx_hash = H256::from_slice(&call_trace.tx_hash); let index = call_trace.tx_index_in_block.unwrap_or_default() as usize; - (call_trace.into_call(protocol_version), hash, index) + let meta = CallTraceMeta { + index_in_block: index, + tx_hash, + block_number: block_number.0, + block_hash, + }; + (call_trace.into_call(protocol_version), meta) }) .collect()) } @@ -1105,9 +1119,9 @@ mod tests { .await .unwrap(); assert_eq!(traces.len(), 2); - for ((trace, hash, _index), tx_result) in traces.iter().zip(&tx_results) { + for ((trace, meta), tx_result) in traces.iter().zip(&tx_results) { let expected_trace = tx_result.call_trace().unwrap(); - assert_eq!(&tx_result.hash, hash); + assert_eq!(tx_result.hash, meta.tx_hash); assert_eq!(*trace, expected_trace); } } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 67c965312bd..5314e9799b3 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -10,9 +10,10 @@ use zksync_db_connection::{ utils::pg_interval_from_duration, }; use zksync_types::{ - block::L2BlockExecutionData, l1::L1Tx, l2::L2Tx, protocol_upgrade::ProtocolUpgradeTx, Address, - ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, L2BlockNumber, PriorityOpId, - ProtocolVersionId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, + block::L2BlockExecutionData, debug_flat_call::CallTraceMeta, l1::L1Tx, l2::L2Tx, + protocol_upgrade::ProtocolUpgradeTx, Address, ExecuteTransactionCommon, L1BatchNumber, + L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, H256, + PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::u256_to_big_decimal; use zksync_vm_interface::{ @@ -2131,12 +2132,17 @@ impl TransactionsDal<'_, '_> { Ok(data) } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { + pub async fn get_call_trace( + &mut self, + tx_hash: H256, + ) -> DalResult> { let row = sqlx::query!( r#" SELECT protocol_version, - index_in_block + index_in_block, + miniblocks.number AS "miniblock_number!", + miniblocks.hash AS "miniblocks_hash!" FROM transactions INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number @@ -2177,7 +2183,12 @@ impl TransactionsDal<'_, '_> { .map(|call_trace| { ( parse_call_trace(&call_trace.call_trace, protocol_version), - row.index_in_block.unwrap_or_default() as usize, + CallTraceMeta { + index_in_block: row.index_in_block.unwrap_or_default() as usize, + tx_hash, + block_number: row.miniblock_number as u32, + block_hash: H256::from_slice(&row.miniblocks_hash), + }, ) })) } diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 103b6de1fb3..1c7672264cb 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -14,8 +14,9 @@ pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; use crate::{ - debug_flat_call::DebugCallFlat, protocol_version::L1VerifierConfig, Address, L2BlockNumber, - ProtocolVersionId, + debug_flat_call::{DebugCallFlat, ResultDebugCallFlat}, + protocol_version::L1VerifierConfig, + Address, L2BlockNumber, ProtocolVersionId, }; pub mod en; @@ -763,11 +764,11 @@ pub enum BlockStatus { #[serde(untagged)] pub enum CallTracerBlockResult { CallTrace(Vec), - FlatCallTrace(Vec), + FlatCallTrace(Vec), } impl CallTracerBlockResult { - pub fn unwrap_flat(self) -> Vec { + pub fn unwrap_flat(self) -> Vec { match self { Self::CallTrace(_) => panic!("Result is a FlatCallTrace"), Self::FlatCallTrace(trace) => trace, diff --git a/core/lib/types/src/debug_flat_call.rs b/core/lib/types/src/debug_flat_call.rs index 89a008b5fb5..5809026e521 100644 --- a/core/lib/types/src/debug_flat_call.rs +++ b/core/lib/types/src/debug_flat_call.rs @@ -3,6 +3,13 @@ use zksync_basic_types::{web3::Bytes, U256}; use crate::{api::DebugCallType, Address, H256}; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ResultDebugCallFlat { + pub tx_hash: H256, + pub result: Vec, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DebugCallFlat { @@ -12,6 +19,8 @@ pub struct DebugCallFlat { pub trace_address: Vec, pub transaction_position: usize, pub transaction_hash: H256, + pub block_number: u32, + pub block_hash: H256, pub r#type: DebugCallType, } @@ -32,3 +41,11 @@ pub struct CallResult { pub output: Bytes, pub gas_used: U256, } + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct CallTraceMeta { + pub index_in_block: usize, + pub tx_hash: H256, + pub block_number: u32, + pub block_hash: H256, +} diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index e296fe87faa..726f35ac29a 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -7,7 +7,7 @@ use zksync_types::{ BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, DebugCall, DebugCallType, ResultDebugCall, SupportedTracers, TracerConfig, }, - debug_flat_call::{Action, CallResult, DebugCallFlat}, + debug_flat_call::{Action, CallResult, CallTraceMeta, DebugCallFlat, ResultDebugCallFlat}, l2::L2Tx, transaction_request::CallRequest, web3, H256, U256, @@ -31,8 +31,7 @@ impl DebugNamespace { pub(crate) fn map_call( call: Call, - index: usize, - transaction_hash: H256, + meta: CallTraceMeta, tracer_option: TracerConfig, ) -> CallTracerResult { match tracer_option.tracer { @@ -42,14 +41,13 @@ impl DebugNamespace { )), SupportedTracers::FlatCallTracer => { let mut calls = vec![]; - let mut traces = vec![index]; + let mut traces = vec![meta.index_in_block]; Self::flatten_call( call, &mut calls, &mut traces, tracer_option.tracer_config.only_top_call, - index, - transaction_hash, + &meta, ); CallTracerResult::FlatCallTrace(calls) } @@ -89,8 +87,7 @@ impl DebugNamespace { calls: &mut Vec, trace_address: &mut Vec, only_top_call: bool, - transaction_position: usize, - transaction_hash: H256, + meta: &CallTraceMeta, ) { let subtraces = call.calls.len(); let debug_type = match call.r#type { @@ -120,22 +117,17 @@ impl DebugNamespace { result, subtraces, trace_address: trace_address.clone(), // Clone the current trace address - transaction_position, - transaction_hash, + transaction_position: meta.index_in_block, + transaction_hash: meta.tx_hash, + block_number: meta.block_number, + block_hash: meta.block_hash, r#type: DebugCallType::Call, }); if !only_top_call { for (number, call) in call.calls.into_iter().enumerate() { trace_address.push(number); - Self::flatten_call( - call, - calls, - trace_address, - false, - transaction_position, - transaction_hash, - ); + Self::flatten_call(call, calls, trace_address, false, meta); trace_address.pop(); } } @@ -158,6 +150,7 @@ impl DebugNamespace { let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; + // let block_hash = block_hash self.state. self.current_method() .set_block_diff(self.state.last_sealed_l2_block.diff(block_number)); @@ -172,25 +165,31 @@ impl DebugNamespace { SupportedTracers::CallTracer => CallTracerBlockResult::CallTrace( call_traces .into_iter() - .map(|(call, _, _)| ResultDebugCall { + .map(|(call, _)| ResultDebugCall { result: Self::map_default_call(call, options.tracer_config.only_top_call), }) .collect(), ), SupportedTracers::FlatCallTracer => { - let mut flat_calls = vec![]; - for (call, tx_hash, tx_index) in call_traces { - let mut traces = vec![tx_index]; - Self::flatten_call( - call, - &mut flat_calls, - &mut traces, - options.tracer_config.only_top_call, - tx_index, - tx_hash, - ); - } - CallTracerBlockResult::FlatCallTrace(flat_calls) + let res = call_traces + .into_iter() + .map(|(call, meta)| { + let mut traces = vec![meta.index_in_block]; + let mut flat_calls = vec![]; + Self::flatten_call( + call, + &mut flat_calls, + &mut traces, + options.tracer_config.only_top_call, + &meta, + ); + ResultDebugCallFlat { + tx_hash: meta.tx_hash, + result: flat_calls, + } + }) + .collect(); + CallTracerBlockResult::FlatCallTrace(res) } }; Ok(result) @@ -207,13 +206,8 @@ impl DebugNamespace { .get_call_trace(tx_hash) .await .map_err(DalError::generalize)?; - Ok(call_trace.map(|(call_trace, index_in_block)| { - Self::map_call( - call_trace, - index_in_block, - tx_hash, - options.unwrap_or_default(), - ) + Ok(call_trace.map(|(call_trace, meta)| { + Self::map_call(call_trace, meta, options.unwrap_or_default()) })) } @@ -305,8 +299,6 @@ impl DebugNamespace { )) } }; - // It's a call request, it's safe to keep it zero - let hash = H256::zero(); let call = Call::new_high_level( call.common_data.fee.gas_limit.as_u64(), result.vm.statistics.gas_used, @@ -316,6 +308,12 @@ impl DebugNamespace { revert_reason, result.call_traces, ); - Ok(Self::map_call(call, 0, hash, options)) + let number = block_args.resolved_block_number(); + let meta = CallTraceMeta { + block_number: number.0, + // It's a call request, it's safe to everything as default + ..Default::default() + }; + Ok(Self::map_call(call, meta, options)) } } diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index 4f021b777ae..28a22511fa9 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -139,32 +139,27 @@ impl HttpTest for TraceBlockFlatTest { .await? .unwrap_flat(); - // A transaction with 2 nested calls will convert into 3 Flattened calls. - // Also in this test, all tx have the same # of nested calls - assert_eq!( - block_traces.len(), - tx_results.len() * (tx_results[0].call_traces.len() + 1) - ); + assert_eq!(block_traces.len(), tx_results.len()); + + let tx_traces = &block_traces.first().unwrap().result; // First tx has 2 nested calls, thus 2 sub-traces - assert_eq!(block_traces[0].subtraces, 2); - assert_eq!(block_traces[0].trace_address, [0]); + assert_eq!(tx_traces[0].subtraces, 2); + assert_eq!(tx_traces[0].trace_address, [0]); // Second flat-call (fist nested call) do not have nested calls - assert_eq!(block_traces[1].subtraces, 0); - assert_eq!(block_traces[1].trace_address, [0, 0]); + assert_eq!(tx_traces[1].subtraces, 0); + assert_eq!(tx_traces[1].trace_address, [0, 0]); - let top_level_call_indexes = [0, 3, 6]; + let top_level_call_indexes = [0, 1, 2]; let top_level_traces = top_level_call_indexes .iter() .map(|&i| block_traces[i].clone()); for (top_level_trace, tx_result) in top_level_traces.zip(&tx_results) { - assert_eq!(top_level_trace.action.from, Address::zero()); - assert_eq!(top_level_trace.action.to, BOOTLOADER_ADDRESS); - assert_eq!( - top_level_trace.action.gas, - tx_result.transaction.gas_limit() - ); + let trace = top_level_trace.result.first().unwrap(); + assert_eq!(trace.action.from, Address::zero()); + assert_eq!(trace.action.to, BOOTLOADER_ADDRESS); + assert_eq!(trace.action.gas, tx_result.transaction.gas_limit()); } // TODO: test inner calls } From 899ffc074ac21cf0c3c0bb1e0c876cfd3d8fda72 Mon Sep 17 00:00:00 2001 From: koloz193 Date: Wed, 16 Oct 2024 14:43:07 -0400 Subject: [PATCH 078/140] feat(upgrade): update tool to generate calldata for setting new chain creation params (#3117) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../protocol-upgrade/src/transaction.ts | 58 ++++++++++++++++++- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index e7a3f32b322..bd7df8ab456 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -27,12 +27,26 @@ import * as path from 'path'; const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); +export enum Action { + Add = 0, + Replace = 1, + Remove = 2 +} + export interface DiamondCutData { facetCuts: FacetCut[]; initAddress: string; initCalldata: string; } +export interface ChainCreationParams { + genesisUpgrade: string; + genesisBatchHash: string; + genesisIndexRepeatedStorageChanges: number; + genesisBatchCommitment: string; + diamondCut: DiamondCutData; +} + export interface ForceDeployment { // The bytecode hash to put on an address bytecodeHash: BytesLike; @@ -186,6 +200,10 @@ export function prepareUpgradeCalldata( upgradeAddress: string, facetCuts: FacetCut[], zksyncAddress: string, + genesisUpgradeAddress: string, + genesisBatchHash: string, + genesisIndexRepeatedStorageChanges: number, + genesisBatchCommitment: string, prepareDirectOperation?: boolean, chainId?: string ) { @@ -194,6 +212,21 @@ export function prepareUpgradeCalldata( initAddress: upgradeAddress, initCalldata }; + + let chainCreationDiamondCut: DiamondCutData = { + facetCuts: facetCuts.filter((cut) => cut.action == Action.Add), + initAddress: genesisUpgradeAddress, + initCalldata: '0x' + }; + + let chainCreationParams: ChainCreationParams = { + genesisUpgrade: genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, + diamondCut: chainCreationDiamondCut + }; + // Prepare calldata for STM let stm = new StateTransitionManagerFactory(); const stmUpgradeCalldata = stm.interface.encodeFunctionData('setNewVersionUpgrade', [ @@ -203,6 +236,10 @@ export function prepareUpgradeCalldata( newProtocolVersion ]); + const stmSetChainCreationCalldata = stm.interface.encodeFunctionData('setChainCreationParams', [ + chainCreationParams + ]); + // Prepare calldata for upgrading diamond proxy let adminFacet = new AdminFacetFactory(); const diamondProxyUpgradeCalldata = adminFacet.interface.encodeFunctionData('upgradeChainFromVersion', [ @@ -215,7 +252,8 @@ export function prepareUpgradeCalldata( let result: any = { stmUpgradeCalldata, chainAdminUpgradeCalldata, - diamondCut + diamondCut, + stmSetChainCreationCalldata }; if (prepareDirectOperation) { @@ -242,6 +280,10 @@ export function buildDefaultUpgradeTx( upgradeTimestamp, zksyncAddress, postUpgradeCalldataFlag, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation?, chainId? ) { @@ -329,6 +371,10 @@ export function buildDefaultUpgradeTx( upgradeAddress, facetCuts, zksyncAddress, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation, chainId ); @@ -376,7 +422,11 @@ command .option('--zksync-address ') .option('--chain-id ') .option('--prepare-direct-operation ') - .option('--post-upgrade-calldata') + .option('--post-upgrade-calldata ') + .option('--genesis-upgrade-address ') + .option('--genesis-batch-hash ') + .option('--genesis-index-repeated-storage-changes ') + .option('--genesis-batch-commitment ') .action(async (options) => { buildDefaultUpgradeTx( options.environment, @@ -386,6 +436,10 @@ command options.upgradeTimestamp, options.zksyncAddress, options.postUpgradeCalldata, + options.genesisUpgradeAddress, + options.genesisBatchHash, + options.genesisIndexRepeatedStorageChanges, + options.genesisBatchCommitment, options.prepareDirectOperation, options.chainId ); From 4cb28b16a77ff94ebcf613e42d6e3e17cce4330b Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 17 Oct 2024 11:16:49 +0300 Subject: [PATCH 079/140] feat(contract-verifier): add zkvyper 1.5.6 (#3118) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- docker/contract-verifier/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 80938e4ef83..e9d83903d11 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -68,7 +68,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 5); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ From bbe191937fa5c5711a7164fd4f0c2ae65cda0833 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Thu, 17 Oct 2024 10:17:20 +0200 Subject: [PATCH 080/140] fix(prover): Run for zero queue to allow scaling down to 0 (#3115) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * Run evaluation if queue is 0, but there some pods running in the namespace. * Add check if clusters data is ready. * Add max_provers config. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. ref ZKD-1855 --- .../config/src/configs/prover_autoscaler.rs | 2 + .../src/proto/config/prover_autoscaler.proto | 6 ++ .../protobuf_config/src/prover_autoscaler.rs | 38 ++++++++++++ .../prover_autoscaler/src/cluster_types.rs | 10 +--- .../prover_autoscaler/src/global/scaler.rs | 59 +++++++++++++++---- .../prover_autoscaler/src/global/watcher.rs | 49 ++++++++++----- .../crates/bin/prover_autoscaler/src/main.rs | 14 ++--- 7 files changed, 134 insertions(+), 44 deletions(-) diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs index 41131fc1b8c..6f83f0d2d18 100644 --- a/core/lib/config/src/configs/prover_autoscaler.rs +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -51,6 +51,8 @@ pub struct ProverAutoscalerScalerConfig { pub cluster_priorities: HashMap, /// Prover speed per GPU. Used to calculate desired number of provers for queue size. pub prover_speed: HashMap, + /// Maximum number of provers which can be run per cluster/GPU. + pub max_provers: HashMap>, /// Duration after which pending pod considered long pending. #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] pub long_pending_duration: Duration, diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto index e1d11b94d8f..8363b625119 100644 --- a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -34,6 +34,11 @@ message ProverSpeed { optional uint32 speed = 2; // required } +message MaxProver { + optional string cluster_and_gpu = 1; // required, format: / + optional uint32 max = 2; // required +} + message ProverAutoscalerScalerConfig { optional uint32 prometheus_port = 1; // required optional std.Duration scaler_run_interval = 2; // optional @@ -43,4 +48,5 @@ message ProverAutoscalerScalerConfig { repeated ClusterPriority cluster_priorities = 6; // optional repeated ProverSpeed prover_speed = 7; // optional optional uint32 long_pending_duration_s = 8; // optional + repeated MaxProver max_provers = 9; // optional } diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs index f7da099cb82..e95e4003972 100644 --- a/core/lib/protobuf_config/src/prover_autoscaler.rs +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use anyhow::Context as _; use time::Duration; use zksync_config::configs::{self, prover_autoscaler::Gpu}; @@ -92,6 +94,15 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { Some(s) => Duration::seconds(s.into()), None => Self::Type::default_long_pending_duration(), }, + max_provers: self.max_provers.iter().fold(HashMap::new(), |mut acc, e| { + let (cluster_and_gpu, max) = e.read().expect("max_provers"); + if let Some((cluster, gpu)) = cluster_and_gpu.split_once('/') { + acc.entry(cluster.to_string()) + .or_default() + .insert(gpu.parse().expect("max_provers/gpu"), max); + } + acc + }), }) } @@ -117,6 +128,15 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { .map(|(k, v)| proto::ProverSpeed::build(&(*k, *v))) .collect(), long_pending_duration_s: Some(this.long_pending_duration.whole_seconds() as u32), + max_provers: this + .max_provers + .iter() + .flat_map(|(cluster, inner_map)| { + inner_map.iter().map(move |(gpu, max)| { + proto::MaxProver::build(&(format!("{}/{}", cluster, gpu), *max)) + }) + }) + .collect(), } } } @@ -170,3 +190,21 @@ impl ProtoRepr for proto::ProverSpeed { } } } + +impl ProtoRepr for proto::MaxProver { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster_and_gpu) + .context("cluster_and_gpu")? + .parse()?, + *required(&self.max).context("max")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster_and_gpu: Some(this.0.to_string()), + max: Some(this.1), + } + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs index b074e0774c9..c25b624b5d4 100644 --- a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -36,19 +36,11 @@ pub struct Namespace { pub pods: HashMap, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct Cluster { pub name: String, pub namespaces: HashMap, } -impl Default for Cluster { - fn default() -> Self { - Self { - name: "".to_string(), - namespaces: HashMap::new(), - } - } -} #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Clusters { diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index 9f37c4d1167..75c9e2e3e42 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -56,6 +56,7 @@ pub struct Scaler { /// Which cluster to use first. cluster_priorities: HashMap, + max_provers: HashMap>, prover_speed: HashMap, long_pending_duration: chrono::Duration, } @@ -87,6 +88,7 @@ impl Scaler { watcher, queuer, cluster_priorities: config.cluster_priorities, + max_provers: config.max_provers, prover_speed: config.prover_speed, long_pending_duration: chrono::Duration::seconds( config.long_pending_duration.whole_seconds(), @@ -112,7 +114,12 @@ impl Scaler { let e = gp_map.entry(gpu).or_insert(GPUPool { name: cluster.name.clone(), gpu, - max_pool_size: 100, // TODO: get from the agent. + max_pool_size: self + .max_provers + .get(&cluster.name) + .and_then(|inner_map| inner_map.get(&gpu)) + .copied() + .unwrap_or(0), ..Default::default() }); @@ -265,23 +272,46 @@ impl Scaler { } } - tracing::debug!("run result: provers {:?}, total: {}", &provers, total); + tracing::debug!( + "run result for namespace {}: provers {:?}, total: {}", + namespace, + &provers, + total + ); provers } } +/// is_namespace_running returns true if there are some pods running in it. +fn is_namespace_running(namespace: &str, clusters: &Clusters) -> bool { + clusters + .clusters + .values() + .flat_map(|v| v.namespaces.iter()) + .filter_map(|(k, v)| if k == namespace { Some(v) } else { None }) + .flat_map(|v| v.deployments.values()) + .map( + |d| d.running + d.desired, // If there is something running or expected to run, we + // should consider the namespace. + ) + .sum::() + > 0 +} + #[async_trait::async_trait] impl Task for Scaler { async fn invoke(&self) -> anyhow::Result<()> { let queue = self.queuer.get_queue().await.unwrap(); - // TODO: Check that clusters data is ready. - let clusters = self.watcher.clusters.lock().await; + let guard = self.watcher.data.lock().await; + watcher::check_is_ready(&guard.is_ready)?; + for (ns, ppv) in &self.namespaces { let q = queue.queue.get(ppv).cloned().unwrap_or(0); - if q > 0 { - let provers = self.run(ns, q, &clusters); + tracing::debug!("Running eval for namespace {ns} and PPV {ppv} found queue {q}"); + if q > 0 || is_namespace_running(ns, &guard.clusters) { + let provers = self.run(ns, q, &guard.clusters); for (k, num) in &provers { AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] .set(*num as u64); @@ -302,7 +332,7 @@ mod tests { use super::*; use crate::{ - cluster_types::{self, Deployment, Namespace, Pod}, + cluster_types::{Deployment, Namespace, Pod}, global::{queuer, watcher}, }; @@ -310,14 +340,19 @@ mod tests { fn test_run() { let watcher = watcher::Watcher { cluster_agents: vec![], - clusters: Arc::new(Mutex::new(cluster_types::Clusters { - ..Default::default() - })), + data: Arc::new(Mutex::new(watcher::WatchedData::default())), }; let queuer = queuer::Queuer { prover_job_monitor_url: "".to_string(), }; - let scaler = Scaler::new(watcher, queuer, ProverAutoscalerScalerConfig::default()); + let scaler = Scaler::new( + watcher, + queuer, + ProverAutoscalerScalerConfig { + max_provers: HashMap::from([("foo".to_string(), HashMap::from([(Gpu::L4, 100)]))]), + ..Default::default() + }, + ); let got = scaler.run( &"prover".to_string(), 1499, @@ -355,6 +390,6 @@ mod tests { }, 3, )]); - assert!(got == want); + assert_eq!(got, want); } } diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs index ef3ebd3b819..01fa68c60f8 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, sync::Arc}; -use anyhow::{Context, Ok}; +use anyhow::{anyhow, Context, Ok, Result}; use futures::future; use reqwest::Method; use tokio::sync::Mutex; @@ -12,15 +12,31 @@ use crate::{ task_wiring::Task, }; +#[derive(Default)] +pub struct WatchedData { + pub clusters: Clusters, + pub is_ready: Vec, +} + +pub fn check_is_ready(v: &Vec) -> Result<()> { + for b in v { + if !b { + return Err(anyhow!("Clusters data is not ready")); + } + } + Ok(()) +} + #[derive(Clone)] pub struct Watcher { /// List of base URLs of all agents. pub cluster_agents: Vec>, - pub clusters: Arc>, + pub data: Arc>, } impl Watcher { pub fn new(agent_urls: Vec) -> Self { + let size = agent_urls.len(); Self { cluster_agents: agent_urls .into_iter() @@ -31,8 +47,11 @@ impl Watcher { ) }) .collect(), - clusters: Arc::new(Mutex::new(Clusters { - clusters: HashMap::new(), + data: Arc::new(Mutex::new(WatchedData { + clusters: Clusters { + clusters: HashMap::new(), + }, + is_ready: vec![false; size], })), } } @@ -45,7 +64,8 @@ impl Task for Watcher { .cluster_agents .clone() .into_iter() - .map(|a| { + .enumerate() + .map(|(i, a)| { tracing::debug!("Getting cluster data from agent {}.", a); tokio::spawn(async move { let url: String = a @@ -55,13 +75,14 @@ impl Task for Watcher { .to_string(); let response = send_request_with_retries(&url, 5, Method::GET, None, None).await; - response + let res = response .map_err(|err| { anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") })? .json::() .await - .context("Failed to read response as json") + .context("Failed to read response as json"); + Ok((i, res)) }) }) .collect(); @@ -71,18 +92,16 @@ impl Task for Watcher { .await .into_iter() .map(|h| async move { - let c = h.unwrap().unwrap(); - self.clusters - .lock() - .await - .clusters - .insert(c.name.clone(), c); + let (i, res) = h??; + let c = res?; + let mut guard = self.data.lock().await; + guard.clusters.clusters.insert(c.name.clone(), c); + guard.is_ready[i] = true; Ok(()) }) .collect::>(), ) - .await - .unwrap(); + .await?; Ok(()) } diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs index 196bd6deb81..e3aec1fbd39 100644 --- a/prover/crates/bin/prover_autoscaler/src/main.rs +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -80,24 +80,21 @@ async fn main() -> anyhow::Result<()> { let _ = rustls::crypto::ring::default_provider().install_default(); let client = kube::Client::try_default().await?; - tracing::info!("Starting ProverAutoscaler"); - let mut tasks = vec![]; match opt.job { AutoscalerType::Agent => { + let cluster = opt + .cluster_name + .context("cluster_name is required for Agent")?; + tracing::info!("Starting ProverAutoscaler Agent for cluster {}", cluster); let agent_config = general_config.agent_config.context("agent_config")?; let exporter_config = PrometheusExporterConfig::pull(agent_config.prometheus_port); tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); // TODO: maybe get cluster name from curl -H "Metadata-Flavor: Google" // http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name - let watcher = Watcher::new( - client.clone(), - opt.cluster_name - .context("cluster_name is required for Agent")?, - agent_config.namespaces, - ); + let watcher = Watcher::new(client.clone(), cluster, agent_config.namespaces); let scaler = Scaler { client }; tasks.push(tokio::spawn(watcher.clone().run())); tasks.push(tokio::spawn(agent::run_server( @@ -108,6 +105,7 @@ async fn main() -> anyhow::Result<()> { ))) } AutoscalerType::Scaler => { + tracing::info!("Starting ProverAutoscaler Scaler"); let scaler_config = general_config.scaler_config.context("scaler_config")?; let interval = scaler_config.scaler_run_interval.unsigned_abs(); let exporter_config = PrometheusExporterConfig::pull(scaler_config.prometheus_port); From 18ae3f9dfef5840377b01a46a9a6685aa1b536d6 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Thu, 17 Oct 2024 11:03:44 +0200 Subject: [PATCH 081/140] ci: Add prover-autoscaler into old release workflow (#3116) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add prover-autoscaler into old release workflow ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. ref ZKD-1855 --- .github/workflows/build-prover-template.yml | 4 ++-- infrastructure/zk/src/docker.ts | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index d6ec61114c7..2dcb5dadb17 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -45,7 +45,7 @@ jobs: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: ${{ inputs.CUDA_ARCH }} - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: component: @@ -56,6 +56,7 @@ jobs: - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor + - prover-autoscaler outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} steps: @@ -91,7 +92,6 @@ jobs: run: | ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key - - name: login to Docker registries if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) run: | diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 063777a671b..dc716a0b257 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -16,7 +16,8 @@ const IMAGES = [ 'prover-job-monitor', 'proof-fri-gpu-compressor', 'snapshots-creator', - 'verified-sources-fetcher' + 'verified-sources-fetcher', + 'prover-autoscaler' ]; const DOCKER_REGISTRIES = ['us-docker.pkg.dev/matterlabs-infra/matterlabs-docker', 'matterlabs']; @@ -76,7 +77,8 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'contract-verifier', 'prover-fri-gateway', 'prover-job-monitor', - 'snapshots-creator' + 'snapshots-creator', + 'prover-autoscaler' ].includes(image) ? ['latest', 'latest2.0', `2.0-${imageTagSha}`, `${imageTagSha}`, `2.0-${imageTagShaTS}`, `${imageTagShaTS}`] : [`latest2.0`, 'latest']; From 76ed6d966051c56f8e894c18461c5ea284b1a74b Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Thu, 17 Oct 2024 12:15:41 +0200 Subject: [PATCH 082/140] fix(prover): Do not exit on missing watcher data. (#3119) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Do not exit on missing watcher data. ## Why ❔ To fix always exiting in `scaler_period` after start due to missing data. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. ref ZKD-1855 --- prover/crates/bin/prover_autoscaler/src/global/scaler.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index 75c9e2e3e42..dd3f3cf1ad3 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -305,7 +305,10 @@ impl Task for Scaler { let queue = self.queuer.get_queue().await.unwrap(); let guard = self.watcher.data.lock().await; - watcher::check_is_ready(&guard.is_ready)?; + if let Err(err) = watcher::check_is_ready(&guard.is_ready) { + tracing::warn!("Skipping Scaler run: {}", err); + return Ok(()); + } for (ns, ppv) in &self.namespaces { let q = queue.queue.get(ppv).cloned().unwrap_or(0); From 3b532f5e363e0cb093221e6d064ce1a7d7428b52 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 17 Oct 2024 15:27:58 +0300 Subject: [PATCH 083/140] feat: Add option to run prover images from tag (#3120) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add option to run prover images from tag, but not only latest. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- zkstack_cli/crates/zkstack/README.md | 10 +++++++++- .../zkstack/src/commands/prover/args/init.rs | 4 ++-- .../zkstack/src/commands/prover/args/run.rs | 5 +++++ .../crates/zkstack/src/commands/prover/run.rs | 6 ++++-- zkstack_cli/crates/zkstack/src/consts.rs | 15 +++++++-------- 5 files changed, 27 insertions(+), 13 deletions(-) diff --git a/zkstack_cli/crates/zkstack/README.md b/zkstack_cli/crates/zkstack/README.md index 6e529efc200..f352d96fec4 100644 --- a/zkstack_cli/crates/zkstack/README.md +++ b/zkstack_cli/crates/zkstack/README.md @@ -508,7 +508,11 @@ Initialize prover - `--public-location ` - `--public-project-id ` - `--bellman-cuda-dir ` -- `--download-key ` +- `--bellman-cuda` + + Possible values: `true`, `false` + +- `--setup-compressor-key ` Possible values: `true`, `false` @@ -564,6 +568,10 @@ Run prover Possible values: `true`, `false` +- `--tag' - Tag of the docker image to run. + + Default value is `latest2.0` but you can specify your prefered one. + - `--round ` Possible values: `all-rounds`, `basic-circuits`, `leaf-aggregation`, `node-aggregation`, `recursion-tip`, `scheduler` diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs index 280b5b2e91d..fab79899302 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs @@ -61,7 +61,7 @@ pub struct ProverInitArgs { pub bellman_cuda: Option, #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub setup_compressor_keys: Option, + pub setup_compressor_key: Option, #[clap(flatten)] pub compressor_keys_args: CompressorKeysArgs, @@ -363,7 +363,7 @@ impl ProverInitArgs { }); } - let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { + let download_key = self.clone().setup_compressor_key.unwrap_or_else(|| { PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) .default(false) .ask() diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs index d7600ba2d31..b79af777673 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs @@ -35,6 +35,8 @@ pub struct ProverRunArgs { pub circuit_prover_args: CircuitProverArgs, #[clap(long)] pub docker: Option, + #[clap(long)] + pub tag: Option, } #[derive( @@ -300,6 +302,8 @@ impl ProverRunArgs { .ask() }); + let tag = self.tag.unwrap_or("latest2.0".to_string()); + Ok(ProverRunArgs { component: Some(component), witness_generator_args, @@ -307,6 +311,7 @@ impl ProverRunArgs { fri_prover_args: self.fri_prover_args, circuit_prover_args, docker: Some(docker), + tag: Some(tag), }) } } diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs index 863816b9ae6..85495d12404 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs @@ -33,7 +33,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let application_args = component.get_application_args(in_docker)?; let additional_args = - component.get_additional_args(in_docker, args, &chain, &path_to_ecosystem)?; + component.get_additional_args(in_docker, args.clone(), &chain, &path_to_ecosystem)?; let (message, error) = match component { ProverComponent::WitnessGenerator => ( @@ -83,6 +83,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() run_dockerized_component( shell, component.image_name(), + &args.tag.unwrap(), &application_args, &additional_args, message, @@ -110,6 +111,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() fn run_dockerized_component( shell: &Shell, image_name: &str, + tag: &str, application_args: &[String], args: &[String], message: &'static str, @@ -124,7 +126,7 @@ fn run_dockerized_component( let mut cmd = Cmd::new(cmd!( shell, - "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" + "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name}:{tag} {args...}" )); cmd = cmd.with_force_run(); diff --git a/zkstack_cli/crates/zkstack/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs index ba00af77b5a..b7c4d2a2070 100644 --- a/zkstack_cli/crates/zkstack/src/consts.rs +++ b/zkstack_cli/crates/zkstack/src/consts.rs @@ -17,14 +17,13 @@ pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; -pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway:latest2.0"; -pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:latest2.0"; -pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = - "matterlabs/witness-vector-generator:latest2.0"; -pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; -pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu:latest2.0"; -pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; -pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; +pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway"; +pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator"; +pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-vector-generator"; +pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri"; +pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu"; +pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor"; +pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor"; pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; From 9d88373f1b745c489e98e5ef542644a70e815498 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 17 Oct 2024 15:06:51 +0200 Subject: [PATCH 084/140] fix(tee_verifier): correctly initialize storage for re-execution (#3017) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ With this commit, the TEE verifier uses `WitnessStorageState` of `VMRunWitnessInputData` to initialize the storage. This requires waiting for the BasicWitnessInputProducer to complete and therefore the TEE verifier input producer can be removed. The input for the TEE verifier is now collected in the `proof_data_handler`, which enables to remove the whole job queue for the TEE verifier input producer. ## Why ❔ Previously the storage for VM re-execution was initialized just from `WitnessInputMerklePaths`. This although misses the storage values for slots, which are only read/written to by rolled back transactions. This led to failed verification of blocks, which would normally pass. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --------- Signed-off-by: Harald Hoyer Co-authored-by: Patrick Beza --- Cargo.lock | 25 +- Cargo.toml | 2 - core/bin/zksync_server/src/main.rs | 2 +- core/bin/zksync_server/src/node_builder.rs | 13 +- core/bin/zksync_tee_prover/src/api_client.rs | 36 ++- core/bin/zksync_tee_prover/src/tee_prover.rs | 9 +- ...ad2574cd1310dff1d1bf06825d5634ba25f04.json | 30 -- ...94ec52b3eb68c346492a8fed98f20f2a0381d.json | 36 --- ...62af196d586cc08ea0f23d2c568527e94b41d.json | 12 - ...e494ce1d8e3b6cfb0b897745fb596f283be79.json | 52 ---- ...7dc982c8cfb0e2277aff8dfaa9654255451ac.json | 26 ++ ...6f1a607a0bcc6864490c5961dd4e2ee12ed78.json | 22 -- ...5aea6710351dea1f1e57d73447476c3fcd199.json | 28 -- ...3c39e774c405508e73e77cdd0c01f924c97c0.json | 40 --- ...729b9149fee37c5ef7d69e259ee33cb8ca860.json | 65 ----- ...9542f0dba42101b32e026751362e169381662.json | 22 ++ ...e_tee_verifier_input_producer_job.down.sql | 20 ++ ...ove_tee_verifier_input_producer_job.up.sql | 3 + core/lib/dal/src/lib.rs | 10 +- core/lib/dal/src/tee_proof_generation_dal.rs | 185 +++++++------ .../src/tee_verifier_input_producer_dal.rs | 234 ---------------- core/lib/object_store/src/file.rs | 1 - core/lib/object_store/src/raw.rs | 2 - core/lib/prover_interface/src/api.rs | 2 +- core/lib/prover_interface/src/inputs.rs | 29 +- core/lib/tee_verifier/Cargo.toml | 7 +- core/lib/tee_verifier/src/lib.rs | 154 +++++++---- .../types/src/storage/witness_block_state.rs | 2 +- core/lib/zksync_core_leftovers/src/lib.rs | 6 - .../api_server/src/web3/tests/unstable.rs | 7 +- core/node/metadata_calculator/src/updater.rs | 4 - core/node/node_framework/Cargo.toml | 1 - .../src/implementations/layers/mod.rs | 1 - .../layers/proof_data_handler.rs | 8 +- .../layers/tee_verifier_input_producer.rs | 69 ----- core/node/proof_data_handler/Cargo.toml | 2 + core/node/proof_data_handler/src/errors.rs | 25 +- core/node/proof_data_handler/src/lib.rs | 26 +- .../src/tee_request_processor.rs | 115 ++++++-- core/node/proof_data_handler/src/tests.rs | 152 +++------- core/node/shared_metrics/src/lib.rs | 2 - .../tee_verifier_input_producer/Cargo.toml | 27 -- .../tee_verifier_input_producer/README.md | 3 - .../tee_verifier_input_producer/src/lib.rs | 261 ------------------ .../src/metrics.rs | 18 -- etc/env/base/rust.toml | 3 +- etc/env/file_based/general.yaml | 2 +- prover/docs/03_launch.md | 2 +- 48 files changed, 487 insertions(+), 1316 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json delete mode 100644 core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json delete mode 100644 core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json delete mode 100644 core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json create mode 100644 core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json delete mode 100644 core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json delete mode 100644 core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json delete mode 100644 core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json delete mode 100644 core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json create mode 100644 core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json create mode 100644 core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql create mode 100644 core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql delete mode 100644 core/lib/dal/src/tee_verifier_input_producer_dal.rs delete mode 100644 core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs delete mode 100644 core/node/tee_verifier_input_producer/Cargo.toml delete mode 100644 core/node/tee_verifier_input_producer/README.md delete mode 100644 core/node/tee_verifier_input_producer/src/lib.rs delete mode 100644 core/node/tee_verifier_input_producer/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index 774471d3d6c..887b71c39ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10710,7 +10710,6 @@ dependencies = [ "zksync_state", "zksync_state_keeper", "zksync_storage", - "zksync_tee_verifier_input_producer", "zksync_types", "zksync_utils", "zksync_vlog", @@ -10878,6 +10877,8 @@ dependencies = [ "zksync_object_store", "zksync_prover_interface", "zksync_types", + "zksync_utils", + "zksync_vm_executor", ] [[package]] @@ -11187,6 +11188,8 @@ name = "zksync_tee_verifier" version = "0.1.0" dependencies = [ "anyhow", + "bincode", + "once_cell", "serde", "tracing", "zksync_config", @@ -11194,31 +11197,11 @@ dependencies = [ "zksync_crypto_primitives", "zksync_merkle_tree", "zksync_multivm", - "zksync_object_store", "zksync_prover_interface", "zksync_types", "zksync_utils", ] -[[package]] -name = "zksync_tee_verifier_input_producer" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "tokio", - "tracing", - "vise", - "zksync_dal", - "zksync_object_store", - "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_tee_verifier", - "zksync_types", - "zksync_utils", - "zksync_vm_executor", -] - [[package]] name = "zksync_test_account" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 60b5628f419..940d5dd036b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,6 @@ members = [ "core/node/consensus", "core/node/contract_verification_server", "core/node/api_server", - "core/node/tee_verifier_input_producer", "core/node/base_token_adjuster", "core/node/external_proof_integration_api", "core/node/logs_bloom_backfill", @@ -309,6 +308,5 @@ zksync_node_storage_init = { version = "0.1.0", path = "core/node/node_storage_i zksync_node_consensus = { version = "0.1.0", path = "core/node/consensus" } zksync_contract_verification_server = { version = "0.1.0", path = "core/node/contract_verification_server" } zksync_node_api_server = { version = "0.1.0", path = "core/node/api_server" } -zksync_tee_verifier_input_producer = { version = "0.1.0", path = "core/node/tee_verifier_input_producer" } zksync_base_token_adjuster = { version = "0.1.0", path = "core/node/base_token_adjuster" } zksync_logs_bloom_backfill = { version = "0.1.0", path = "core/node/logs_bloom_backfill" } diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index da0a93f624d..9e1a1b5948c 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -45,7 +45,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,vm_runner_protective_reads" + default_value = "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,vm_runner_protective_reads" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index b04227965f8..c87bf3ce2dd 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -55,7 +55,6 @@ use zksync_node_framework::{ main_batch_executor::MainBatchExecutorLayer, mempool_io::MempoolIOLayer, output_handler::OutputHandlerLayer, RocksdbStorageOptions, StateKeeperLayer, }, - tee_verifier_input_producer::TeeVerifierInputProducerLayer, vm_runner::{ bwip::BasicWitnessInputProducerLayer, playground::VmPlaygroundLayer, protective_reads::ProtectiveReadsWriterLayer, @@ -288,6 +287,7 @@ impl MainNodeBuilder { self.node.add_layer(ProofDataHandlerLayer::new( try_load_config!(self.configs.proof_data_handler_config), self.genesis_config.l1_batch_commit_data_generator_mode, + self.genesis_config.l2_chain_id, )); Ok(self) } @@ -493,14 +493,6 @@ impl MainNodeBuilder { Ok(self) } - fn add_tee_verifier_input_producer_layer(mut self) -> anyhow::Result { - self.node.add_layer(TeeVerifierInputProducerLayer::new( - self.genesis_config.l2_chain_id, - )); - - Ok(self) - } - fn add_da_client_layer(mut self) -> anyhow::Result { let Some(da_client_config) = self.configs.da_client_config.clone() else { tracing::warn!("No config for DA client, using the NoDA client"); @@ -727,9 +719,6 @@ impl MainNodeBuilder { Component::EthTxManager => { self = self.add_eth_tx_manager_layer()?; } - Component::TeeVerifierInputProducer => { - self = self.add_tee_verifier_input_producer_layer()?; - } Component::Housekeeper => { self = self .add_house_keeper_layer()? diff --git a/core/bin/zksync_tee_prover/src/api_client.rs b/core/bin/zksync_tee_prover/src/api_client.rs index 13fbc1ba886..ffc2839b8d3 100644 --- a/core/bin/zksync_tee_prover/src/api_client.rs +++ b/core/bin/zksync_tee_prover/src/api_client.rs @@ -1,13 +1,10 @@ -use reqwest::Client; +use reqwest::{Client, Response, StatusCode}; use secp256k1::{ecdsa::Signature, PublicKey}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::Serialize; use url::Url; use zksync_basic_types::H256; use zksync_prover_interface::{ - api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitTeeProofRequest, - SubmitTeeProofResponse, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, - }, + api::{RegisterTeeAttestationRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest}, inputs::TeeVerifierInput, outputs::L1BatchTeeProofForL1, }; @@ -31,10 +28,9 @@ impl TeeApiClient { } } - async fn post(&self, endpoint: S, request: Req) -> Result + async fn post(&self, endpoint: S, request: Req) -> Result where Req: Serialize + std::fmt::Debug, - Resp: DeserializeOwned, S: AsRef, { let url = self.api_base_url.join(endpoint.as_ref()).unwrap(); @@ -46,9 +42,7 @@ impl TeeApiClient { .json(&request) .send() .await? - .error_for_status()? - .json::() - .await + .error_for_status() } /// Registers the attestation quote with the TEE prover interface API, effectively proving that @@ -63,8 +57,7 @@ impl TeeApiClient { attestation: attestation_quote_bytes, pubkey: public_key.serialize().to_vec(), }; - self.post::<_, RegisterTeeAttestationResponse, _>("/tee/register_attestation", request) - .await?; + self.post("/tee/register_attestation", request).await?; tracing::info!( "Attestation quote was successfully registered for the public key {}", public_key @@ -77,12 +70,17 @@ impl TeeApiClient { pub async fn get_job( &self, tee_type: TeeType, - ) -> Result>, TeeProverError> { + ) -> Result, TeeProverError> { let request = TeeProofGenerationDataRequest { tee_type }; - let response = self - .post::<_, TeeProofGenerationDataResponse, _>("/tee/proof_inputs", request) - .await?; - Ok(response.0) + let response = self.post("/tee/proof_inputs", request).await?; + match response.status() { + StatusCode::OK => Ok(Some(response.json::().await?)), + StatusCode::NO_CONTENT => Ok(None), + _ => response + .json::>() + .await + .map_err(TeeProverError::Request), + } } /// Submits the successfully verified proof to the TEE prover interface API. @@ -101,7 +99,7 @@ impl TeeApiClient { tee_type, })); let observer = METRICS.proof_submitting_time.start(); - self.post::<_, SubmitTeeProofResponse, _>( + self.post( format!("/tee/submit_proofs/{batch_number}").as_str(), request, ) diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 1511f0c88e3..bb7176644e6 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -90,9 +90,9 @@ impl TeeProver { } async fn step(&self, public_key: &PublicKey) -> Result, TeeProverError> { - match self.api_client.get_job(self.config.tee_type).await? { - Some(job) => { - let (signature, batch_number, root_hash) = self.verify(*job)?; + match self.api_client.get_job(self.config.tee_type).await { + Ok(Some(job)) => { + let (signature, batch_number, root_hash) = self.verify(job)?; self.api_client .submit_proof( batch_number, @@ -104,10 +104,11 @@ impl TeeProver { .await?; Ok(Some(batch_number)) } - None => { + Ok(None) => { tracing::trace!("There are currently no pending batches to be proven"); Ok(None) } + Err(err) => Err(err), } } } diff --git a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json b/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json deleted file mode 100644 index 05b94ad249a..00000000000 --- a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n input_blob_url = $4\n WHERE\n l1_batch_number = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text" - ] - }, - "nullable": [] - }, - "hash": "0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04" -} diff --git a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json b/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json deleted file mode 100644 index 9d8cc36189f..00000000000 --- a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = $2\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text" - ] - }, - "nullable": [ - false - ] - }, - "hash": "2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d" -} diff --git a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json b/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json deleted file mode 100644 index a273eb249a4..00000000000 --- a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM tee_verifier_input_producer_jobs\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [] - }, - "nullable": [] - }, - "hash": "2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d" -} diff --git a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json b/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json deleted file mode 100644 index 6012c632651..00000000000 --- a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n error = $4\n WHERE\n l1_batch_number = $2\n AND status != $5\n RETURNING\n tee_verifier_input_producer_jobs.attempts\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [ - false - ] - }, - "hash": "3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79" -} diff --git a/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json b/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json new file mode 100644 index 00000000000..4d006b6d1d5 --- /dev/null +++ b/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n l1_batches l1\n ON p.l1_batch_number = l1.number\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n AND l1.hash IS NOT NULL\n AND l1.aux_data_hash IS NOT NULL\n AND l1.meta_parameters_hash IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac" +} diff --git a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json b/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json deleted file mode 100644 index f34c4a548cb..00000000000 --- a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78" -} diff --git a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json b/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json deleted file mode 100644 index 01ede1d8643..00000000000 --- a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_verifier_input_producer_jobs (\n l1_batch_number, status, created_at, updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [] - }, - "hash": "6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199" -} diff --git a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json b/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json deleted file mode 100644 index b17b5828211..00000000000 --- a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $2\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $3\n AND (\n proofs.status = $4\n OR (\n proofs.status = $1\n AND proofs.prover_taken_at < NOW() - $5::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $6\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text", - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0" -} diff --git a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json b/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json deleted file mode 100644 index fa1a5d6741a..00000000000 --- a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n status = $2\n OR (\n status = $1\n AND processing_started_at < NOW() - $4::INTERVAL\n )\n OR (\n status = $3\n AND attempts < $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_verifier_input_producer_jobs.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Interval", - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860" -} diff --git a/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json new file mode 100644 index 00000000000..12e28266fbc --- /dev/null +++ b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n WHERE\n proofs.status = $1\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662" +} diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql new file mode 100644 index 00000000000..707ce306365 --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql @@ -0,0 +1,20 @@ +CREATE TABLE tee_verifier_input_producer_jobs ( + l1_batch_number BIGINT NOT NULL, + status TEXT NOT NULL, + signature BYTEA, + pubkey BYTEA, + proof BYTEA, + tee_type TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + prover_taken_at TIMESTAMP, + PRIMARY KEY (l1_batch_number, tee_type), + CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey FOREIGN KEY (l1_batch_number) REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) ON DELETE CASCADE, + CONSTRAINT tee_proof_generation_details_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES tee_attestations(pubkey) ON DELETE SET NULL +); + +ALTER TABLE tee_proof_generation_details + ADD CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) + REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) + ON DELETE CASCADE; diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql new file mode 100644 index 00000000000..c2417ba86b3 --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE tee_proof_generation_details DROP CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey; + +DROP TABLE IF EXISTS tee_verifier_input_producer_jobs; diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index f0d2f0c1671..fbe225beb90 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -23,8 +23,7 @@ use crate::{ snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, sync_dal::SyncDal, system_dal::SystemDal, tee_proof_generation_dal::TeeProofGenerationDal, - tee_verifier_input_producer_dal::TeeVerifierInputProducerDal, tokens_dal::TokensDal, - tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, + tokens_dal::TokensDal, tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, transactions_web3_dal::TransactionsWeb3Dal, vm_runner_dal::VmRunnerDal, }; @@ -56,7 +55,6 @@ pub mod storage_web3_dal; pub mod sync_dal; pub mod system_dal; pub mod tee_proof_generation_dal; -pub mod tee_verifier_input_producer_dal; pub mod tokens_dal; pub mod tokens_web3_dal; pub mod transactions_dal; @@ -81,8 +79,6 @@ where fn transactions_web3_dal(&mut self) -> TransactionsWeb3Dal<'_, 'a>; - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a>; - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a>; fn blocks_web3_dal(&mut self) -> BlocksWeb3Dal<'_, 'a>; @@ -155,10 +151,6 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { TransactionsWeb3Dal { storage: self } } - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a> { - TeeVerifierInputProducerDal { storage: self } - } - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a> { BlocksDal { storage: self } } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index db56b9d0e3e..d865212f190 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -10,10 +10,7 @@ use zksync_db_connection::{ }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; -use crate::{ - models::storage_tee_proof::StorageTeeProof, - tee_verifier_input_producer_dal::TeeVerifierInputProducerJobStatus, Core, -}; +use crate::{models::storage_tee_proof::StorageTeeProof, Core}; #[derive(Debug)] pub struct TeeProofGenerationDal<'a, 'c> { @@ -39,61 +36,78 @@ impl TeeProofGenerationDal<'_, '_> { ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); - let query = sqlx::query!( + sqlx::query!( r#" - UPDATE tee_proof_generation_details - SET - status = $1, - updated_at = NOW(), - prover_taken_at = NOW() - WHERE - tee_type = $2 - AND l1_batch_number = ( - SELECT - proofs.l1_batch_number - FROM - tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number - WHERE - inputs.status = $3 - AND ( - proofs.status = $4 + WITH upsert AS ( + SELECT + p.l1_batch_number + FROM + proof_generation_details p + LEFT JOIN + l1_batches l1 + ON p.l1_batch_number = l1.number + LEFT JOIN + tee_proof_generation_details tee + ON + p.l1_batch_number = tee.l1_batch_number + AND tee.tee_type = $1 + WHERE + ( + p.l1_batch_number >= $5 + AND p.vm_run_data_blob_url IS NOT NULL + AND p.proof_gen_data_blob_url IS NOT NULL + AND l1.hash IS NOT NULL + AND l1.aux_data_hash IS NOT NULL + AND l1.meta_parameters_hash IS NOT NULL + ) + AND ( + tee.l1_batch_number IS NULL + OR ( + tee.status = $3 OR ( - proofs.status = $1 - AND proofs.prover_taken_at < NOW() - $5::INTERVAL + tee.status = $2 + AND tee.prover_taken_at < NOW() - $4::INTERVAL ) ) - AND proofs.l1_batch_number >= $6 - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) + ) + FETCH FIRST ROW ONLY + ) + + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at + ) + SELECT + l1_batch_number, + $1, + $2, + NOW(), + NOW(), + NOW() + FROM + upsert + ON CONFLICT (l1_batch_number, tee_type) DO + UPDATE + SET + status = $2, + updated_at = NOW(), + prover_taken_at = NOW() RETURNING - tee_proof_generation_details.l1_batch_number + l1_batch_number "#, - TeeProofGenerationJobStatus::PickedByProver.to_string(), tee_type.to_string(), - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, + TeeProofGenerationJobStatus::PickedByProver.to_string(), TeeProofGenerationJobStatus::Unpicked.to_string(), processing_timeout, min_batch_number - ); - - let batch_number = Instrumented::new("lock_batch_for_proving") - .with_arg("tee_type", &tee_type) - .with_arg("processing_timeout", &processing_timeout) - .with_arg("l1_batch_number", &min_batch_number) - .with(query) - .fetch_optional(self.storage) - .await? - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - - Ok(batch_number) + ) + .instrument("lock_batch_for_proving") + .with_arg("tee_type", &tee_type) + .with_arg("processing_timeout", &processing_timeout) + .with_arg("l1_batch_number", &min_batch_number) + .fetch_optional(self.storage) + .await + .map(|record| record.map(|record| L1BatchNumber(record.l1_batch_number as u32))) } pub async fn unlock_batch( @@ -176,38 +190,6 @@ impl TeeProofGenerationDal<'_, '_> { Ok(()) } - pub async fn insert_tee_proof_generation_job( - &mut self, - batch_number: L1BatchNumber, - tee_type: TeeType, - ) -> DalResult<()> { - let batch_number = i64::from(batch_number.0); - let query = sqlx::query!( - r#" - INSERT INTO - tee_proof_generation_details ( - l1_batch_number, tee_type, status, created_at, updated_at - ) - VALUES - ($1, $2, $3, NOW(), NOW()) - ON CONFLICT (l1_batch_number, tee_type) DO NOTHING - "#, - batch_number, - tee_type.to_string(), - TeeProofGenerationJobStatus::Unpicked.to_string(), - ); - let instrumentation = Instrumented::new("insert_tee_proof_generation_job") - .with_arg("l1_batch_number", &batch_number) - .with_arg("tee_type", &tee_type); - instrumentation - .clone() - .with(query) - .execute(self.storage) - .await?; - - Ok(()) - } - pub async fn save_attestation(&mut self, pubkey: &[u8], attestation: &[u8]) -> DalResult<()> { let query = sqlx::query!( r#" @@ -271,6 +253,40 @@ impl TeeProofGenerationDal<'_, '_> { Ok(proofs) } + /// For testing purposes only. + pub async fn insert_tee_proof_generation_job( + &mut self, + batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( + r#" + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + ON CONFLICT (l1_batch_number, tee_type) DO NOTHING + "#, + batch_number, + tee_type.to_string(), + TeeProofGenerationJobStatus::Unpicked.to_string(), + ); + let instrumentation = Instrumented::new("insert_tee_proof_generation_job") + .with_arg("l1_batch_number", &batch_number) + .with_arg("tee_type", &tee_type); + instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + + Ok(()) + } + + /// For testing purposes only. pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { let query = sqlx::query!( r#" @@ -278,18 +294,13 @@ impl TeeProofGenerationDal<'_, '_> { proofs.l1_batch_number FROM tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number WHERE - inputs.status = $1 - AND proofs.status = $2 + proofs.status = $1 ORDER BY proofs.l1_batch_number ASC LIMIT 1 "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, TeeProofGenerationJobStatus::Unpicked.to_string(), ); let batch_number = Instrumented::new("get_oldest_unpicked_batch") diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs deleted file mode 100644 index dddb451a2d7..00000000000 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ /dev/null @@ -1,234 +0,0 @@ -use std::time::{Duration, Instant}; - -use sqlx::postgres::types::PgInterval; -use zksync_db_connection::{ - connection::Connection, - error::DalResult, - instrument::InstrumentExt, - utils::{duration_to_naive_time, pg_interval_from_duration}, -}; -use zksync_types::L1BatchNumber; - -use crate::Core; - -#[derive(Debug)] -pub struct TeeVerifierInputProducerDal<'a, 'c> { - pub(crate) storage: &'a mut Connection<'c, Core>, -} - -/// The amount of attempts to process a job before giving up. -pub const JOB_MAX_ATTEMPT: i16 = 5; - -/// Time to wait for job to be processed -const JOB_PROCESSING_TIMEOUT: PgInterval = pg_interval_from_duration(Duration::from_secs(10 * 60)); - -/// Status of a job that the producer will work on. - -#[derive(Debug, sqlx::Type)] -#[sqlx(type_name = "tee_verifier_input_producer_job_status")] -pub enum TeeVerifierInputProducerJobStatus { - /// When the job is queued. Metadata calculator creates the job and marks it as queued. - Queued, - /// The job is not going to be processed. This state is designed for manual operations on DB. - /// It is expected to be used if some jobs should be skipped like: - /// - testing purposes (want to check a specific L1 Batch, I can mark everything before it skipped) - /// - trim down costs on some environments (if I've done breaking changes, - /// makes no sense to wait for everything to be processed, I can just skip them and save resources) - ManuallySkipped, - /// Currently being processed by one of the jobs. Transitory state, will transition to either - /// [`TeeVerifierInputProducerStatus::Successful`] or [`TeeVerifierInputProducerStatus::Failed`]. - InProgress, - /// The final (happy case) state we expect all jobs to end up. After the run is complete, - /// the job uploaded it's inputs, it lands in successful. - Successful, - /// The job failed for reasons. It will be marked as such and the error persisted in DB. - /// If it failed less than MAX_ATTEMPTs, the job will be retried, - /// otherwise it will stay in this state as final state. - Failed, -} - -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn create_tee_verifier_input_producer_job( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult<()> { - sqlx::query!( - r#" - INSERT INTO - tee_verifier_input_producer_jobs ( - l1_batch_number, status, created_at, updated_at - ) - VALUES - ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING - "#, - i64::from(l1_batch_number.0), - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - ) - .instrument("create_tee_verifier_input_producer_job") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn get_next_tee_verifier_input_producer_job( - &mut self, - ) -> DalResult> { - let l1_batch_number = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - attempts = attempts + 1, - updated_at = NOW(), - processing_started_at = NOW() - WHERE - l1_batch_number = ( - SELECT - l1_batch_number - FROM - tee_verifier_input_producer_jobs - WHERE - status = $2 - OR ( - status = $1 - AND processing_started_at < NOW() - $4::INTERVAL - ) - OR ( - status = $3 - AND attempts < $5 - ) - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) - RETURNING - tee_verifier_input_producer_jobs.l1_batch_number - "#, - TeeVerifierInputProducerJobStatus::InProgress as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - &JOB_PROCESSING_TIMEOUT, - JOB_MAX_ATTEMPT, - ) - .instrument("get_next_tee_verifier_input_producer_job") - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| L1BatchNumber(job.l1_batch_number as u32)); - - Ok(l1_batch_number) - } - - pub async fn get_tee_verifier_input_producer_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - tee_verifier_input_producer_jobs - WHERE - l1_batch_number = $1 - "#, - i64::from(l1_batch_number.0), - ) - .instrument("get_tee_verifier_input_producer_job_attempts") - .with_arg("l1_batch_number", &l1_batch_number) - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } - - pub async fn mark_job_as_successful( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - object_path: &str, - ) -> DalResult<()> { - sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - input_blob_url = $4 - WHERE - l1_batch_number = $2 - "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - object_path, - ) - .instrument("mark_job_as_successful") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn mark_job_as_failed( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - error: String, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - error = $4 - WHERE - l1_batch_number = $2 - AND status != $5 - RETURNING - tee_verifier_input_producer_jobs.attempts - "#, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - error, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - ) - .instrument("mark_job_as_failed") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } -} - -/// These functions should only be used for tests. -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn delete_all_jobs(&mut self) -> DalResult<()> { - sqlx::query!( - r#" - DELETE FROM tee_verifier_input_producer_jobs - "# - ) - .instrument("delete_all_tee_verifier_jobs") - .execute(self.storage) - .await?; - Ok(()) - } -} diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index 308cd65427f..3484f2dad34 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -42,7 +42,6 @@ impl FileBackedObjectStore { Bucket::SchedulerWitnessJobsFri, Bucket::ProofsFri, Bucket::StorageSnapshot, - Bucket::TeeVerifierInput, Bucket::VmDumps, ] { let bucket_path = format!("{base_dir}/{bucket}"); diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 740e8d76e24..0859d58d04b 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -19,7 +19,6 @@ pub enum Bucket { ProofsTee, StorageSnapshot, DataAvailability, - TeeVerifierInput, VmDumps, } @@ -39,7 +38,6 @@ impl Bucket { Self::ProofsTee => "proofs_tee", Self::StorageSnapshot => "storage_logs_snapshots", Self::DataAvailability => "data_availability", - Self::TeeVerifierInput => "tee_verifier_inputs", Self::VmDumps => "vm_dumps", } } diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 776cd3141cb..acf104cc4c6 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -31,7 +31,7 @@ pub enum ProofGenerationDataResponse { } #[derive(Debug, Serialize, Deserialize)] -pub struct TeeProofGenerationDataResponse(pub Option>); +pub struct TeeProofGenerationDataResponse(pub Box); #[derive(Debug, Serialize, Deserialize)] pub enum SubmitProofResponse { diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 28bc1998312..97de24f42da 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -136,7 +136,7 @@ impl WitnessInputMerklePaths { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct VMRunWitnessInputData { pub l1_batch_number: L1BatchNumber, pub used_bytecodes: HashMap>, @@ -205,7 +205,7 @@ impl StoredObject for VMRunWitnessInputData { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: WitnessInputMerklePaths, @@ -254,7 +254,7 @@ impl StoredObject for WitnessInputData { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct L1BatchMetadataHashes { pub root_hash: H256, pub meta_hash: H256, @@ -264,27 +264,27 @@ pub struct L1BatchMetadataHashes { /// Version 1 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { - pub witness_input_merkle_paths: WitnessInputMerklePaths, + pub vm_run_data: VMRunWitnessInputData, + pub merkle_paths: WitnessInputMerklePaths, pub l2_blocks_execution_data: Vec, pub l1_batch_env: L1BatchEnv, pub system_env: SystemEnv, - pub used_contracts: Vec<(H256, Vec)>, } impl V1TeeVerifierInput { pub fn new( - witness_input_merkle_paths: WitnessInputMerklePaths, + vm_run_data: VMRunWitnessInputData, + merkle_paths: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, - used_contracts: Vec<(H256, Vec)>, ) -> Self { V1TeeVerifierInput { - witness_input_merkle_paths, + vm_run_data, + merkle_paths, l2_blocks_execution_data, l1_batch_env, system_env, - used_contracts, } } } @@ -305,17 +305,6 @@ impl TeeVerifierInput { } } -impl StoredObject for TeeVerifierInput { - const BUCKET: Bucket = Bucket::TeeVerifierInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("tee_verifier_input_for_l1_batch_{key}.bin") - } - - serialize_using_bincode!(); -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index 6828eeef8b1..331c47e365e 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -11,18 +11,21 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true zksync_config.workspace = true zksync_crypto_primitives.workspace = true zksync_merkle_tree.workspace = true -zksync_object_store.workspace = true +zksync_multivm.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true zksync_utils.workspace = true anyhow.workspace = true +once_cell.workspace = true serde.workspace = true tracing.workspace = true [dev-dependencies] zksync_contracts.workspace = true +zksync_prover_interface.workspace = true + +bincode.workspace = true diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 86b563f823e..ffe3a548a02 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -4,16 +4,14 @@ //! executing the VM and verifying all the accessed memory slots by their //! merkle path. -use std::{cell::RefCell, rc::Rc}; - -use anyhow::Context; +use anyhow::{bail, Context, Result}; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }; use zksync_multivm::{ interface::{ - storage::{InMemoryStorage, ReadStorage, StorageView}, + storage::{ReadStorage, StorageSnapshot, StorageView}, FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, @@ -23,8 +21,10 @@ use zksync_multivm::{ use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, Transaction, H256}; -use zksync_utils::bytecode::hash_bytecode; +use zksync_types::{ + block::L2BlockExecutionData, L1BatchNumber, StorageLog, StorageValue, Transaction, H256, +}; +use zksync_utils::u256_to_h256; /// A structure to hold the result of verification. pub struct VerificationResult { @@ -50,30 +50,48 @@ impl Verify for V1TeeVerifierInput { /// not actionable. fn verify(self) -> anyhow::Result { let old_root_hash = self.l1_batch_env.previous_batch_hash.unwrap(); - let l2_chain_id = self.system_env.chain_id; - let enumeration_index = self.witness_input_merkle_paths.next_enumeration_index(); + let enumeration_index = self.merkle_paths.next_enumeration_index(); + let batch_number = self.l1_batch_env.number; - let mut raw_storage = InMemoryStorage::with_custom_system_contracts_and_chain_id( - l2_chain_id, - hash_bytecode, - Vec::with_capacity(0), - ); + let read_storage_ops = self + .vm_run_data + .witness_block_state + .read_storage_key + .into_iter(); - for (hash, bytes) in self.used_contracts.into_iter() { - tracing::trace!("raw_storage.store_factory_dep({hash}, bytes)"); - raw_storage.store_factory_dep(hash, bytes) - } + let initial_writes_ops = self + .vm_run_data + .witness_block_state + .is_write_initial + .into_iter(); - let block_output_with_proofs = - get_bowp_and_set_initial_values(self.witness_input_merkle_paths, &mut raw_storage); + // We need to define storage slots read during batch execution, and their initial state; + // hence, the use of both read_storage_ops and initial_writes_ops. + // StorageSnapshot also requires providing enumeration indices, + // but they only matter at the end of execution when creating pubdata for the batch, + // which is irrelevant in this case. Thus, enumeration indices are set to dummy values. + let storage = read_storage_ops + .enumerate() + .map(|(i, (hash, bytes))| (hash.hashed_key(), Some((bytes, i as u64 + 1u64)))) + .chain(initial_writes_ops.filter_map(|(key, initial_write)| { + initial_write.then_some((key.hashed_key(), None)) + })) + .collect(); - let storage_view = Rc::new(RefCell::new(StorageView::new(&raw_storage))); + let factory_deps = self + .vm_run_data + .used_bytecodes + .into_iter() + .map(|(hash, bytes)| (u256_to_h256(hash), bytes.into_flattened())) + .collect(); - let batch_number = self.l1_batch_env.number; + let storage_snapshot = StorageSnapshot::new(storage, factory_deps); + let storage_view = StorageView::new(storage_snapshot).to_rc_ptr(); let vm = LegacyVmInstance::new(self.l1_batch_env, self.system_env, storage_view); - let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; + let block_output_with_proofs = get_bowp(self.merkle_paths)?; + let instructions: Vec = generate_tree_instructions(enumeration_index, &block_output_with_proofs, vm_out)?; @@ -89,11 +107,8 @@ impl Verify for V1TeeVerifierInput { } /// Sets the initial storage values and returns `BlockOutputWithProofs` -fn get_bowp_and_set_initial_values( - witness_input_merkle_paths: WitnessInputMerklePaths, - raw_storage: &mut InMemoryStorage, -) -> BlockOutputWithProofs { - let logs = witness_input_merkle_paths +fn get_bowp(witness_input_merkle_paths: WitnessInputMerklePaths) -> Result { + let logs_result: Result<_, _> = witness_input_merkle_paths .into_merkle_paths() .map( |StorageLogMetadata { @@ -110,29 +125,31 @@ fn get_bowp_and_set_initial_values( let merkle_path = merkle_paths.into_iter().map(|x| x.into()).collect(); let base: TreeLogEntry = match (is_write, first_write, leaf_enumeration_index) { (false, _, 0) => TreeLogEntry::ReadMissingKey, - (false, _, _) => { + (false, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Read {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Read { leaf_index: leaf_enumeration_index, value: value_read.into(), } } + (false, true, _) => { + tracing::error!("get_bowp is_write = false, first_write = true"); + bail!("get_bowp is_write = false, first_write = true"); + } (true, true, _) => TreeLogEntry::Inserted, (true, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Updated {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Updated { leaf_index: leaf_enumeration_index, @@ -140,19 +157,21 @@ fn get_bowp_and_set_initial_values( } } }; - TreeLogEntryWithProof { + Ok(TreeLogEntryWithProof { base, merkle_path, root_hash, - } + }) }, ) .collect(); - BlockOutputWithProofs { + let logs: Vec = logs_result?; + + Ok(BlockOutputWithProofs { logs, leaf_count: 0, - } + }) } /// Executes the VM and returns `FinishedL1Batch` on success. @@ -176,11 +195,17 @@ fn execute_vm( .context("failed to execute transaction in TeeVerifierInputProducer")?; tracing::trace!("Finished execution of tx: {tx:?}"); } + + tracing::trace!("finished l2_block {l2_block_data:?}"); + tracing::trace!("about to vm.start_new_l2_block {next_l2_block_data:?}"); + vm.start_new_l2_block(L2BlockEnv::from_l2_block_data(next_l2_block_data)); tracing::trace!("Finished execution of l2_block: {:?}", l2_block_data.number); } + tracing::trace!("about to vm.finish_batch()"); + Ok(vm.finish_batch()) } @@ -191,7 +216,7 @@ fn map_log_tree( idx: &mut u64, ) -> anyhow::Result { let key = storage_log.key.hashed_key_u256(); - Ok(match (storage_log.is_write(), *tree_log_entry) { + let tree_instruction = match (storage_log.is_write(), *tree_log_entry) { (true, TreeLogEntry::Updated { leaf_index, .. }) => { TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) } @@ -203,24 +228,31 @@ fn map_log_tree( (false, TreeLogEntry::Read { value, .. }) => { if storage_log.value != value { tracing::error!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", - storage_log.value, - value - ); - anyhow::bail!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction: read value {:#?} != {:#?}", storage_log.value, value ); + anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } TreeInstruction::Read(key) } (false, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), - _ => { - tracing::error!("Failed to map LogQuery to TreeInstruction"); + (true, TreeLogEntry::Read { .. }) + | (true, TreeLogEntry::ReadMissingKey) + | (false, TreeLogEntry::Inserted) + | (false, TreeLogEntry::Updated { .. }) => { + tracing::error!( + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction" + ); anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } - }) + }; + + Ok(tree_instruction) } /// Generates the `TreeInstruction`s from the VM executions. @@ -269,8 +301,7 @@ fn execute_tx( mod tests { use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv, TxExecutionMode}; - use zksync_object_store::StoredObject; - use zksync_prover_interface::inputs::TeeVerifierInput; + use zksync_prover_interface::inputs::{TeeVerifierInput, VMRunWitnessInputData}; use zksync_types::U256; use super::*; @@ -278,6 +309,18 @@ mod tests { #[test] fn test_v1_serialization() { let tvi = V1TeeVerifierInput::new( + VMRunWitnessInputData { + l1_batch_number: Default::default(), + used_bytecodes: Default::default(), + initial_heap_content: vec![], + protocol_version: Default::default(), + bootloader_code: vec![], + default_account_code_hash: Default::default(), + evm_emulator_code_hash: Some(Default::default()), + storage_refunds: vec![], + pubdata_costs: vec![], + witness_block_state: Default::default(), + }, WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { @@ -313,14 +356,11 @@ mod tests { default_validation_computational_gas_limit: 0, chain_id: Default::default(), }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], ); let tvi = TeeVerifierInput::new(tvi); - let serialized = ::serialize(&tvi) - .expect("Failed to serialize TeeVerifierInput."); + let serialized = bincode::serialize(&tvi).expect("Failed to serialize TeeVerifierInput."); let deserialized: TeeVerifierInput = - ::deserialize(serialized) - .expect("Failed to deserialize TeeVerifierInput."); + bincode::deserialize(&serialized).expect("Failed to deserialize TeeVerifierInput."); assert_eq!(tvi, deserialized); } diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index bce9cc9034d..7f3195af873 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct WitnessStorageState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 9d399bdd0af..87fb7ea28f7 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -43,9 +43,6 @@ pub enum Component { EthTxManager, /// State keeper. StateKeeper, - /// Produces input for the TEE verifier. - /// The blob is later used as input for TEE verifier. - TeeVerifierInputProducer, /// Component for housekeeping task such as cleaning blobs from GCS, reporting metrics etc. Housekeeper, /// Component for exposing APIs to prover for providing proof generation data and accepting proofs. @@ -88,9 +85,6 @@ impl FromStr for Components { "tree_api" => Ok(Components(vec![Component::TreeApi])), "state_keeper" => Ok(Components(vec![Component::StateKeeper])), "housekeeper" => Ok(Components(vec![Component::Housekeeper])), - "tee_verifier_input_producer" => { - Ok(Components(vec![Component::TeeVerifierInputProducer])) - } "eth" => Ok(Components(vec![ Component::EthWatcher, Component::EthTxAggregator, diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs index 1d425f8b951..e814081afa0 100644 --- a/core/node/api_server/src/web3/tests/unstable.rs +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -27,14 +27,9 @@ impl HttpTest for GetTeeProofsTest { assert!(proof.is_empty()); - let mut storage = pool.connection().await.unwrap(); - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(batch_no) - .await?; - let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut storage = pool.connection().await.unwrap(); let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); tee_proof_generation_dal .save_attestation(&pubkey, &attestation) diff --git a/core/node/metadata_calculator/src/updater.rs b/core/node/metadata_calculator/src/updater.rs index e2acf62dea8..17fd5d900ea 100644 --- a/core/node/metadata_calculator/src/updater.rs +++ b/core/node/metadata_calculator/src/updater.rs @@ -152,10 +152,6 @@ impl TreeUpdater { // right away without having to implement dedicated code. if let Some(object_key) = &object_key { - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(l1_batch_number) - .await?; // Save the proof generation details to Postgres storage .proof_generation_dal() diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 1df47e77553..d85f3dc7c8e 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -45,7 +45,6 @@ zksync_node_sync.workspace = true zksync_node_api_server.workspace = true zksync_node_consensus.workspace = true zksync_contract_verification_server.workspace = true -zksync_tee_verifier_input_producer.workspace = true zksync_queued_job_processor.workspace = true zksync_reorg_detector.workspace = true zksync_vm_runner.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 75828da1902..11a62c9333b 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -33,7 +33,6 @@ pub mod reorg_detector; pub mod sigint; pub mod state_keeper; pub mod sync_state_updater; -pub mod tee_verifier_input_producer; pub mod tree_data_fetcher; pub mod validate_chain_ids; pub mod vm_runner; diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index b53ff73c1a0..3e1269caa4e 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; use crate::{ implementations::resources::{ @@ -21,6 +21,7 @@ use crate::{ pub struct ProofDataHandlerLayer { proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[derive(Debug, FromContext)] @@ -41,10 +42,12 @@ impl ProofDataHandlerLayer { pub fn new( proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Self { Self { proof_data_handler_config, commitment_mode, + l2_chain_id, } } } @@ -67,6 +70,7 @@ impl WiringLayer for ProofDataHandlerLayer { blob_store, main_pool, commitment_mode: self.commitment_mode, + l2_chain_id: self.l2_chain_id, }; Ok(Output { task }) @@ -79,6 +83,7 @@ pub struct ProofDataHandlerTask { blob_store: Arc, main_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[async_trait::async_trait] @@ -93,6 +98,7 @@ impl Task for ProofDataHandlerTask { self.blob_store, self.main_pool, self.commitment_mode, + self.l2_chain_id, stop_receiver.0, ) .await diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs deleted file mode 100644 index 68789082a22..00000000000 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier_input_producer::TeeVerifierInputProducer; -use zksync_types::L2ChainId; - -use crate::{ - implementations::resources::{ - object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource}, - }, - service::StopReceiver, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, - FromContext, IntoContext, -}; - -/// Wiring layer for [`TeeVerifierInputProducer`]. -#[derive(Debug)] -pub struct TeeVerifierInputProducerLayer { - l2_chain_id: L2ChainId, -} - -impl TeeVerifierInputProducerLayer { - pub fn new(l2_chain_id: L2ChainId) -> Self { - Self { l2_chain_id } - } -} - -#[derive(Debug, FromContext)] -#[context(crate = crate)] -pub struct Input { - pub master_pool: PoolResource, - pub object_store: ObjectStoreResource, -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - #[context(task)] - pub task: TeeVerifierInputProducer, -} - -#[async_trait::async_trait] -impl WiringLayer for TeeVerifierInputProducerLayer { - type Input = Input; - type Output = Output; - - fn layer_name(&self) -> &'static str { - "tee_verifier_input_producer_layer" - } - - async fn wire(self, input: Self::Input) -> Result { - let pool = input.master_pool.get().await?; - let ObjectStoreResource(object_store) = input.object_store; - let task = TeeVerifierInputProducer::new(pool, object_store, self.l2_chain_id).await?; - - Ok(Output { task }) - } -} - -#[async_trait::async_trait] -impl Task for TeeVerifierInputProducer { - fn id(&self) -> TaskId { - "tee_verifier_input_producer".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0, None).await - } -} diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 82063b23fdb..76dc89eda04 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -17,6 +17,8 @@ zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true +zksync_vm_executor.workspace = true +zksync_utils.workspace = true anyhow.workspace = true axum.workspace = true tokio.workspace = true diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 15ef393294a..7d0e33ea0a3 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -6,6 +6,7 @@ use zksync_dal::DalError; use zksync_object_store::ObjectStoreError; pub(crate) enum RequestProcessorError { + GeneralError(String), ObjectStore(ObjectStoreError), Dal(DalError), } @@ -19,24 +20,26 @@ impl From for RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { - RequestProcessorError::ObjectStore(err) => { + Self::GeneralError(err) => { + tracing::error!("Error: {:?}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "An internal error occurred".to_owned(), + ) + } + Self::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); ( StatusCode::BAD_GATEWAY, "Failed fetching/saving from GCS".to_owned(), ) } - RequestProcessorError::Dal(err) => { + Self::Dal(err) => { tracing::error!("Sqlx error: {:?}", err); - match err.inner() { - zksync_dal::SqlxError::RowNotFound => { - (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) - } - _ => ( - StatusCode::BAD_GATEWAY, - "Failed fetching/saving from db".to_owned(), - ), - } + ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from db".to_owned(), + ) } }; (status_code, message).into_response() diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 51780f03230..a482a7bc07b 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -1,7 +1,7 @@ use std::{net::SocketAddr, sync::Arc}; use anyhow::Context as _; -use axum::{extract::Path, routing::post, Json, Router}; +use axum::{extract::Path, http::StatusCode, response::IntoResponse, routing::post, Json, Router}; use request_processor::RequestProcessor; use tee_request_processor::TeeRequestProcessor; use tokio::sync::watch; @@ -12,7 +12,7 @@ use zksync_prover_interface::api::{ ProofGenerationDataRequest, RegisterTeeAttestationRequest, SubmitProofRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest, }; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; #[cfg(test)] mod tests; @@ -27,11 +27,18 @@ pub async fn run_server( blob_store: Arc, connection_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); tracing::info!("Starting proof data handler server on {bind_address}"); - let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); + let app = create_proof_processing_router( + blob_store, + connection_pool, + config, + commitment_mode, + l2_chain_id, + ); let listener = tokio::net::TcpListener::bind(bind_address) .await @@ -54,6 +61,7 @@ fn create_proof_processing_router( connection_pool: ConnectionPool, config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Router { let get_proof_gen_processor = RequestProcessor::new( blob_store.clone(), @@ -88,7 +96,7 @@ fn create_proof_processing_router( if config.tee_support { let get_tee_proof_gen_processor = - TeeRequestProcessor::new(blob_store, connection_pool, config.clone()); + TeeRequestProcessor::new(blob_store, connection_pool, config.clone(), l2_chain_id); let submit_tee_proof_processor = get_tee_proof_gen_processor.clone(); let register_tee_attestation_processor = get_tee_proof_gen_processor.clone(); @@ -96,9 +104,15 @@ fn create_proof_processing_router( "/tee/proof_inputs", post( move |payload: Json| async move { - get_tee_proof_gen_processor + let result = get_tee_proof_gen_processor .get_proof_generation_data(payload) - .await + .await; + + match result { + Ok(Some(data)) => (StatusCode::OK, data).into_response(), + Ok(None) => { StatusCode::NO_CONTENT.into_response()}, + Err(e) => e.into_response(), + } }, ), ) diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 4ae1a5026f1..2c2a5630009 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -4,11 +4,17 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::{ObjectStore, ObjectStoreError}; -use zksync_prover_interface::api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, +use zksync_prover_interface::{ + api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, + }, + inputs::{ + TeeVerifierInput, V1TeeVerifierInput, VMRunWitnessInputData, WitnessInputMerklePaths, + }, }; -use zksync_types::{tee_types::TeeType, L1BatchNumber}; +use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; +use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::errors::RequestProcessorError; @@ -17,6 +23,7 @@ pub(crate) struct TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, } impl TeeRequestProcessor { @@ -24,35 +31,42 @@ impl TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, ) -> Self { Self { blob_store, pool, config, + l2_chain_id, } } pub(crate) async fn get_proof_generation_data( &self, request: Json, - ) -> Result, RequestProcessorError> { + ) -> Result>, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); let mut min_batch_number: Option = None; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; let result = loop { - let l1_batch_number = match self + let Some(l1_batch_number) = self .lock_batch_for_proving(request.tee_type, min_batch_number) .await? - { - Some(number) => number, - None => break Ok(Json(TeeProofGenerationDataResponse(None))), + else { + // No job available + return Ok(None); }; - match self.blob_store.get(l1_batch_number).await { - Ok(input) => break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))), - Err(ObjectStoreError::KeyNotFound(_)) => { + match self + .tee_verifier_input_for_existing_batch(l1_batch_number) + .await + { + Ok(input) => { + break Ok(Some(Json(TeeProofGenerationDataResponse(Box::new(input))))); + } + Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { missing_range = match missing_range { Some((start, _)) => Some((start, l1_batch_number)), None => Some((l1_batch_number, l1_batch_number)), @@ -62,7 +76,7 @@ impl TeeRequestProcessor { } Err(err) => { self.unlock_batch(l1_batch_number, request.tee_type).await?; - break Err(RequestProcessorError::ObjectStore(err)); + break Err(err); } } }; @@ -78,14 +92,73 @@ impl TeeRequestProcessor { result } + #[tracing::instrument(skip(self))] + async fn tee_verifier_input_for_existing_batch( + &self, + l1_batch_number: L1BatchNumber, + ) -> Result { + let vm_run_data: VMRunWitnessInputData = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let merkle_paths: WitnessInputMerklePaths = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let mut connection = self + .pool + .connection_tagged("tee_request_processor") + .await + .map_err(RequestProcessorError::Dal)?; + + let l2_blocks_execution_data = connection + .transactions_dal() + .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) + .await + .map_err(RequestProcessorError::Dal)?; + + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))?; + + // In the state keeper, this value is used to reject execution. + // All batches have already been executed by State Keeper. + // This means we don't want to reject any execution, therefore we're using MAX as an allow all. + let validation_computational_gas_limit = u32::MAX; + + let (system_env, l1_batch_env) = l1_batch_params_provider + .load_l1_batch_env( + &mut connection, + l1_batch_number, + validation_computational_gas_limit, + self.l2_chain_id, + ) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))? + .ok_or(RequestProcessorError::GeneralError( + "system_env, l1_batch_env missing".into(), + ))?; + + Ok(TeeVerifierInput::new(V1TeeVerifierInput { + vm_run_data, + merkle_paths, + l2_blocks_execution_data, + l1_batch_env, + system_env, + })) + } + async fn lock_batch_for_proving( &self, tee_type: TeeType, min_batch_number: Option, ) -> Result, RequestProcessorError> { - let result = self - .pool - .connection() + self.pool + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .lock_batch_for_proving( @@ -93,8 +166,8 @@ impl TeeRequestProcessor { self.config.proof_generation_timeout(), min_batch_number, ) - .await?; - Ok(result) + .await + .map_err(RequestProcessorError::Dal) } async fn unlock_batch( @@ -103,7 +176,7 @@ impl TeeRequestProcessor { tee_type: TeeType, ) -> Result<(), RequestProcessorError> { self.pool - .connection() + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .unlock_batch(l1_batch_number, tee_type) @@ -117,7 +190,7 @@ impl TeeRequestProcessor { Json(proof): Json, ) -> Result, RequestProcessorError> { let l1_batch_number = L1BatchNumber(l1_batch_number); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); tracing::info!( @@ -143,7 +216,7 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received attestation: {:?}", payload); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); dal.save_attestation(&payload.pubkey, &payload.attestation) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 8220aef5da0..a10044cacd9 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -1,5 +1,3 @@ -use std::time::Instant; - use axum::{ body::Body, http::{self, Method, Request, StatusCode}, @@ -8,128 +6,64 @@ use axum::{ }; use serde_json::json; use tower::ServiceExt; -use zksync_basic_types::U256; +use zksync_basic_types::L2ChainId; use zksync_config::configs::ProofDataHandlerConfig; -use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::{ConnectionPool, CoreDal}; -use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_object_store::MockObjectStore; -use zksync_prover_interface::{ - api::SubmitTeeProofRequest, - inputs::{TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths}, -}; -use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber, H256}; +use zksync_prover_interface::api::SubmitTeeProofRequest; +use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber}; use crate::create_proof_processing_router; -// Test the /tee/proof_inputs endpoint by: -// 1. Mocking an object store with a single batch blob containing TEE verifier input -// 2. Populating the SQL db with relevant information about the status of the TEE verifier input and -// TEE proof generation -// 3. Sending a request to the /tee/proof_inputs endpoint and asserting that the response -// matches the file from the object store #[tokio::test] async fn request_tee_proof_inputs() { - // prepare a sample mocked TEE verifier input - - let batch_number = L1BatchNumber::from(1); - let tvi = V1TeeVerifierInput::new( - WitnessInputMerklePaths::new(0), - vec![], - L1BatchEnv { - previous_batch_hash: Some(H256([1; 32])), - number: batch_number, - timestamp: 0, - fee_input: Default::default(), - fee_account: Default::default(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 0, - timestamp: 0, - prev_block_hash: H256([1; 32]), - max_virtual_blocks_to_create: 0, - }, - }, - SystemEnv { - zk_porter_available: false, - version: Default::default(), - base_system_smart_contracts: BaseSystemContracts { - bootloader: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - default_aa: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - evm_emulator: None, - }, - bootloader_gas_limit: 0, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: 0, - chain_id: Default::default(), - }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], - ); - let tvi = TeeVerifierInput::V1(tvi); - - // populate mocked object store with a single batch blob - - let blob_store = MockObjectStore::arc(); - let object_path = blob_store.put(batch_number, &tvi).await.unwrap(); - - // get connection to the SQL db and mock the status of the TEE proof generation - let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, &object_path).await; - - // test the /tee/proof_inputs endpoint; it should return the batch from the object store let app = create_proof_processing_router( - blob_store, - db_conn_pool, + MockObjectStore::arc(), + db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, tee_support: true, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); - let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "sgx" })).unwrap()); - let response = app - .oneshot( - Request::builder() - .method(Method::POST) - .uri("/tee/proof_inputs") - .header(http::header::CONTENT_TYPE, "application/json") - .body(req_body) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - let deserialized: TeeVerifierInput = serde_json::from_value(json).unwrap(); - - assert_eq!(tvi, deserialized); + let test_cases = vec![ + (json!({ "tee_type": "sgx" }), StatusCode::NO_CONTENT), + ( + json!({ "tee_type": "Sgx" }), + StatusCode::UNPROCESSABLE_ENTITY, + ), + ]; + + for (body, expected_status) in test_cases { + let req_body = Body::from(serde_json::to_vec(&body).unwrap()); + let response = app + .clone() + .oneshot( + Request::builder() + .method(Method::POST) + .uri("/tee/proof_inputs") + .header(http::header::CONTENT_TYPE, "application/json") + .body(req_body) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), expected_status); + } } // Test /tee/submit_proofs endpoint using a mocked TEE proof and verify response and db state #[tokio::test] async fn submit_tee_proof() { - let blob_store = MockObjectStore::arc(); - let db_conn_pool = ConnectionPool::test_pool().await; - let object_path = "mocked_object_path"; let batch_number = L1BatchNumber::from(1); + let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, object_path).await; - - // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof + mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; let tee_proof_request_str = r#"{ "signature": "0001020304", @@ -141,7 +75,7 @@ async fn submit_tee_proof() { serde_json::from_str::(tee_proof_request_str).unwrap(); let uri = format!("/tee/submit_proofs/{}", batch_number.0); let app = create_proof_processing_router( - blob_store, + MockObjectStore::arc(), db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, @@ -149,6 +83,7 @@ async fn submit_tee_proof() { tee_support: true, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); // this should fail because we haven't saved the attestation for the pubkey yet @@ -207,32 +142,15 @@ async fn submit_tee_proof() { async fn mock_tee_batch_status( db_conn_pool: ConnectionPool, batch_number: L1BatchNumber, - object_path: &str, ) { let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); let mut proof_dal = proof_db_conn.tee_proof_generation_dal(); - let mut input_db_conn = db_conn_pool.connection().await.unwrap(); - let mut input_producer_dal = input_db_conn.tee_verifier_input_producer_dal(); // there should not be any batches awaiting proof in the db yet let oldest_batch_number = proof_dal.get_oldest_unpicked_batch().await.unwrap(); assert!(oldest_batch_number.is_none()); - // mock SQL table with relevant information about the status of the TEE verifier input - - input_producer_dal - .create_tee_verifier_input_producer_job(batch_number) - .await - .expect("Failed to create tee_verifier_input_producer_job"); - - // pretend that the TEE verifier input blob file was fetched successfully - - input_producer_dal - .mark_job_as_successful(batch_number, Instant::now(), object_path) - .await - .expect("Failed to mark tee_verifier_input_producer_job job as successful"); - // mock SQL table with relevant information about the status of TEE proof generation proof_dal diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index e0a7fa74ef4..2c41ec9293a 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -29,7 +29,6 @@ pub enum InitStage { EthTxAggregator, EthTxManager, Tree, - TeeVerifierInputProducer, Consensus, DADispatcher, } @@ -45,7 +44,6 @@ impl fmt::Display for InitStage { Self::EthTxAggregator => formatter.write_str("eth_tx_aggregator"), Self::EthTxManager => formatter.write_str("eth_tx_manager"), Self::Tree => formatter.write_str("tree"), - Self::TeeVerifierInputProducer => formatter.write_str("tee_verifier_input_producer"), Self::Consensus => formatter.write_str("consensus"), Self::DADispatcher => formatter.write_str("da_dispatcher"), } diff --git a/core/node/tee_verifier_input_producer/Cargo.toml b/core/node/tee_verifier_input_producer/Cargo.toml deleted file mode 100644 index 7a5a4de5d0c..00000000000 --- a/core/node/tee_verifier_input_producer/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "zksync_tee_verifier_input_producer" -description = "ZKsync TEE verifier input producer" -version.workspace = true -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true - -[dependencies] -zksync_dal.workspace = true -zksync_object_store.workspace = true -zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true -zksync_tee_verifier.workspace = true -zksync_types.workspace = true -zksync_utils.workspace = true -zksync_vm_executor.workspace = true -vise.workspace = true - -anyhow.workspace = true -async-trait.workspace = true -tracing.workspace = true -tokio = { workspace = true, features = ["time"] } diff --git a/core/node/tee_verifier_input_producer/README.md b/core/node/tee_verifier_input_producer/README.md deleted file mode 100644 index 75a2029985c..00000000000 --- a/core/node/tee_verifier_input_producer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `zksync_tee_verifier_input_producer` - -Component responsible for producing inputs for verification of execution in TEE. diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs deleted file mode 100644 index 8a99aa07ae5..00000000000 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! Produces input for a TEE Verifier -//! -//! Extract all data needed to re-execute and verify an L1Batch without accessing -//! the DB and/or the object store. -//! -//! For testing purposes, the L1 batch is re-executed immediately for now. -//! Eventually, this component will only extract the inputs and send them to another -//! machine over a "to be defined" channel, e.g., save them to an object store. - -use std::{sync::Arc, time::Instant}; - -use anyhow::Context; -use async_trait::async_trait; -use tokio::task::JoinHandle; -use zksync_dal::{tee_verifier_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Core, CoreDal}; -use zksync_object_store::ObjectStore; -use zksync_prover_interface::inputs::{ - TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths, -}; -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier::Verify; -use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; -use zksync_utils::u256_to_h256; -use zksync_vm_executor::storage::L1BatchParamsProvider; - -use self::metrics::METRICS; - -mod metrics; - -/// Component that extracts all data (from DB) necessary to run a TEE Verifier. -#[derive(Debug)] -pub struct TeeVerifierInputProducer { - connection_pool: ConnectionPool, - l2_chain_id: L2ChainId, - object_store: Arc, -} - -impl TeeVerifierInputProducer { - pub async fn new( - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - Ok(TeeVerifierInputProducer { - connection_pool, - object_store, - l2_chain_id, - }) - } - - async fn process_job_impl( - l1_batch_number: L1BatchNumber, - started_at: Instant, - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - let prepare_basic_circuits_job: WitnessInputMerklePaths = object_store - .get(l1_batch_number) - .await - .context("failed to get PrepareBasicCircuitsJob from object store")?; - - let mut connection = connection_pool - .connection() - .await - .context("failed to get connection for TeeVerifierInputProducer")?; - - let l2_blocks_execution_data = connection - .transactions_dal() - .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) - .await?; - - let l1_batch_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? - .unwrap(); - - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) - .await - .context("failed initializing L1 batch params provider")?; - - // In the state keeper, this value is used to reject execution. - // All batches have already been executed by State Keeper. - // This means we don't want to reject any execution, therefore we're using MAX as an allow all. - let validation_computational_gas_limit = u32::MAX; - - let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_env( - &mut connection, - l1_batch_number, - validation_computational_gas_limit, - l2_chain_id, - ) - .await? - .with_context(|| format!("expected L1 batch #{l1_batch_number} to be sealed"))?; - - let used_contract_hashes = l1_batch_header - .used_contract_hashes - .into_iter() - .map(u256_to_h256) - .collect(); - - // `get_factory_deps()` returns the bytecode in chunks of `Vec<[u8; 32]>`, - // but `fn store_factory_dep(&mut self, hash: H256, bytecode: Vec)` in `InMemoryStorage` wants flat byte vecs. - pub fn into_flattened(data: Vec<[T; N]>) -> Vec { - let mut new = Vec::new(); - for slice in data.iter() { - new.extend_from_slice(slice); - } - new - } - - let used_contracts = connection - .factory_deps_dal() - .get_factory_deps(&used_contract_hashes) - .await - .into_iter() - .map(|(hash, bytes)| (u256_to_h256(hash), into_flattened(bytes))) - .collect(); - - tracing::info!("Started execution of l1_batch: {l1_batch_number:?}"); - - let tee_verifier_input = V1TeeVerifierInput::new( - prepare_basic_circuits_job, - l2_blocks_execution_data, - l1_batch_env, - system_env, - used_contracts, - ); - - // TODO (SEC-263): remove these 2 lines after successful testnet runs - tee_verifier_input.clone().verify()?; - tracing::info!("Looks like we verified {l1_batch_number} correctly"); - - tracing::info!("Finished execution of l1_batch: {l1_batch_number:?}"); - - METRICS.process_batch_time.observe(started_at.elapsed()); - tracing::debug!( - "TeeVerifierInputProducer took {:?} for L1BatchNumber {}", - started_at.elapsed(), - l1_batch_number.0 - ); - - Ok(TeeVerifierInput::new(tee_verifier_input)) - } -} - -#[async_trait] -impl JobProcessor for TeeVerifierInputProducer { - type Job = L1BatchNumber; - type JobId = L1BatchNumber; - type JobArtifacts = TeeVerifierInput; - const SERVICE_NAME: &'static str = "tee_verifier_input_producer"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut connection = self.connection_pool.connection().await?; - let l1_batch_to_process = connection - .tee_verifier_input_producer_dal() - .get_next_tee_verifier_input_producer_job() - .await - .context("failed to get next basic witness input producer job")?; - Ok(l1_batch_to_process.map(|number| (number, number))) - } - - async fn save_failure(&self, job_id: Self::JobId, started_at: Instant, error: String) { - let attempts = self - .connection_pool - .connection() - .await - .unwrap() - .tee_verifier_input_producer_dal() - .mark_job_as_failed(job_id, started_at, error) - .await - .expect("errored whilst marking job as failed"); - if let Some(tries) = attempts { - tracing::warn!("Failed to process job: {job_id:?}, after {tries} tries."); - } else { - tracing::warn!("L1 Batch {job_id:?} was processed successfully by another worker."); - } - } - - async fn process_job( - &self, - _job_id: &Self::JobId, - job: Self::Job, - started_at: Instant, - ) -> JoinHandle> { - let l2_chain_id = self.l2_chain_id; - let connection_pool = self.connection_pool.clone(); - let object_store = self.object_store.clone(); - tokio::task::spawn(async move { - Self::process_job_impl( - job, - started_at, - connection_pool.clone(), - object_store, - l2_chain_id, - ) - .await - }) - } - - async fn save_result( - &self, - job_id: Self::JobId, - started_at: Instant, - artifacts: Self::JobArtifacts, - ) -> anyhow::Result<()> { - let observer: vise::LatencyObserver = METRICS.upload_input_time.start(); - let object_path = self - .object_store - .put(job_id, &artifacts) - .await - .context("failed to upload artifacts for TeeVerifierInputProducer")?; - observer.observe(); - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - let mut transaction = connection - .start_transaction() - .await - .context("failed to acquire DB transaction for TeeVerifierInputProducer")?; - transaction - .tee_verifier_input_producer_dal() - .mark_job_as_successful(job_id, started_at, &object_path) - .await - .context("failed to mark job as successful for TeeVerifierInputProducer")?; - transaction - .tee_proof_generation_dal() - .insert_tee_proof_generation_job(job_id, TeeType::Sgx) - .await?; - transaction - .commit() - .await - .context("failed to commit DB transaction for TeeVerifierInputProducer")?; - METRICS.block_number_processed.set(job_id.0 as u64); - Ok(()) - } - - fn max_attempts(&self) -> u32 { - JOB_MAX_ATTEMPT as u32 - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - connection - .tee_verifier_input_producer_dal() - .get_tee_verifier_input_producer_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for TeeVerifierInputProducer") - } -} diff --git a/core/node/tee_verifier_input_producer/src/metrics.rs b/core/node/tee_verifier_input_producer/src/metrics.rs deleted file mode 100644 index 362804d338e..00000000000 --- a/core/node/tee_verifier_input_producer/src/metrics.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Metrics - -use std::time::Duration; - -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "tee_verifier_input_producer")] -pub(crate) struct TeeVerifierInputProducerMetrics { - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub process_batch_time: Histogram, - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub upload_input_time: Histogram, - pub block_number_processed: Gauge, -} - -#[vise::register] -pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index d8bef020c64..18107f0d4f9 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -1,6 +1,6 @@ # Environment configuration for the Rust code # We don't provide the group name like `[rust]` here, because we don't want -# these variables to be prefixed during the compiling. +# these variables to be prefixed during the compiling. # `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. @@ -26,7 +26,6 @@ zksync_node_sync=info,\ zksync_node_consensus=info,\ zksync_contract_verification_server=info,\ zksync_node_api_server=info,\ -zksync_tee_verifier_input_producer=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 017d79dbe73..587ba4614a5 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -312,7 +312,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset diff --git a/prover/docs/03_launch.md b/prover/docs/03_launch.md index 203fb6e8cec..0465d888f61 100644 --- a/prover/docs/03_launch.md +++ b/prover/docs/03_launch.md @@ -47,7 +47,7 @@ We will be running a bunch of binaries, it's recommended to run each in a separa ### Server ``` -zk server --components=api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip +zk server --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip ``` ### Proof data handler From dd4b7cc94e324dfa5a86df09f0cf15642ea2f5c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Fri, 18 Oct 2024 06:31:36 -0300 Subject: [PATCH 085/140] feat(zkstack_cli): Add status page (#3036) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Sample: ![image](https://github.com/user-attachments/assets/712ee40f-c650-43bf-a69c-ba03df37e07a) --------- Co-authored-by: Danil --- .../zkstack/src/commands/dev/commands/mod.rs | 1 + .../src/commands/dev/commands/status/args.rs | 45 ++++++ .../src/commands/dev/commands/status/draw.rs | 88 ++++++++++++ .../src/commands/dev/commands/status/mod.rs | 135 ++++++++++++++++++ .../src/commands/dev/commands/status/utils.rs | 26 ++++ .../zkstack/src/commands/dev/messages.rs | 23 +++ .../crates/zkstack/src/commands/dev/mod.rs | 5 + zkstack_cli/crates/zkstack/src/main.rs | 2 +- zkstack_cli/crates/zkstack/src/utils/ports.rs | 117 +++++++++++---- 9 files changed, 415 insertions(+), 27 deletions(-) create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs index ab98e44533f..a292168dc6e 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs @@ -10,4 +10,5 @@ pub mod prover; pub mod send_transactions; pub mod snapshot; pub(crate) mod sql_fmt; +pub mod status; pub mod test; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs new file mode 100644 index 00000000000..5ac52bf854a --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs @@ -0,0 +1,45 @@ +use anyhow::Context; +use clap::Parser; +use config::EcosystemConfig; +use xshell::Shell; + +use crate::{ + commands::dev::messages::{ + MSG_API_CONFIG_NOT_FOUND_ERR, MSG_STATUS_PORTS_HELP, MSG_STATUS_URL_HELP, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, +}; + +#[derive(Debug, Parser)] +pub enum StatusSubcommands { + #[clap(about = MSG_STATUS_PORTS_HELP)] + Ports, +} + +#[derive(Debug, Parser)] +pub struct StatusArgs { + #[clap(long, short = 'u', help = MSG_STATUS_URL_HELP)] + pub url: Option, + #[clap(subcommand)] + pub subcommand: Option, +} + +impl StatusArgs { + pub fn get_url(&self, shell: &Shell) -> anyhow::Result { + if let Some(url) = &self.url { + Ok(url.clone()) + } else { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let general_config = chain.get_general_config()?; + let health_check_port = general_config + .api_config + .context(MSG_API_CONFIG_NOT_FOUND_ERR)? + .healthcheck + .port; + Ok(format!("http://localhost:{}/health", health_check_port)) + } + } +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs new file mode 100644 index 00000000000..d38d5b6d29f --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs @@ -0,0 +1,88 @@ +use crate::{commands::dev::commands::status::utils::is_port_open, utils::ports::PortInfo}; + +const DEFAULT_LINE_WIDTH: usize = 32; + +pub struct BoxProperties { + longest_line: usize, + border: String, + boxed_msg: Vec, +} + +impl BoxProperties { + fn new(msg: &str) -> Self { + let longest_line = msg + .lines() + .map(|line| line.len()) + .max() + .unwrap_or(0) + .max(DEFAULT_LINE_WIDTH); + let width = longest_line + 2; + let border = "─".repeat(width); + let boxed_msg = msg + .lines() + .map(|line| format!("│ {:longest_line$} │", line)) + .collect(); + Self { + longest_line, + border, + boxed_msg, + } + } +} + +fn single_bordered_box(msg: &str) -> String { + let properties = BoxProperties::new(msg); + format!( + "┌{}┐\n{}\n└{}┘\n", + properties.border, + properties.boxed_msg.join("\n"), + properties.border + ) +} + +pub fn bordered_boxes(msg1: &str, msg2: Option<&String>) -> String { + if msg2.is_none() { + return single_bordered_box(msg1); + } + + let properties1 = BoxProperties::new(msg1); + let properties2 = BoxProperties::new(msg2.unwrap()); + + let max_lines = properties1.boxed_msg.len().max(properties2.boxed_msg.len()); + let header = format!("┌{}┐ ┌{}┐\n", properties1.border, properties2.border); + let footer = format!("└{}┘ └{}┘\n", properties1.border, properties2.border); + + let empty_line1 = format!( + "│ {:longest_line$} │", + "", + longest_line = properties1.longest_line + ); + let empty_line2 = format!( + "│ {:longest_line$} │", + "", + longest_line = properties2.longest_line + ); + + let boxed_info: Vec = (0..max_lines) + .map(|i| { + let line1 = properties1.boxed_msg.get(i).unwrap_or(&empty_line1); + let line2 = properties2.boxed_msg.get(i).unwrap_or(&empty_line2); + format!("{} {}", line1, line2) + }) + .collect(); + + format!("{}{}\n{}", header, boxed_info.join("\n"), footer) +} + +pub fn format_port_info(port_info: &PortInfo) -> String { + let in_use_tag = if is_port_open(port_info.port) { + " [OPEN]" + } else { + "" + }; + + format!( + " - {}{} > {}\n", + port_info.port, in_use_tag, port_info.description + ) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs new file mode 100644 index 00000000000..8687fcb0476 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs @@ -0,0 +1,135 @@ +use std::collections::HashMap; + +use anyhow::Context; +use args::{StatusArgs, StatusSubcommands}; +use common::logger; +use draw::{bordered_boxes, format_port_info}; +use serde::Deserialize; +use serde_json::Value; +use utils::deslugify; +use xshell::Shell; + +use crate::{ + commands::dev::messages::{ + msg_failed_parse_response, msg_not_ready_components, msg_system_status, + MSG_ALL_COMPONENTS_READY, MSG_COMPONENTS, MSG_SOME_COMPONENTS_NOT_READY, + }, + utils::ports::EcosystemPortsScanner, +}; + +pub mod args; +mod draw; +mod utils; + +const STATUS_READY: &str = "ready"; + +#[derive(Deserialize, Debug)] +struct StatusResponse { + status: String, + components: HashMap, +} + +#[derive(Deserialize, Debug)] +struct Component { + status: String, + details: Option, +} + +fn print_status(health_check_url: String) -> anyhow::Result<()> { + let client = reqwest::blocking::Client::new(); + let response = client.get(&health_check_url).send()?.text()?; + + let status_response: StatusResponse = + serde_json::from_str(&response).context(msg_failed_parse_response(&response))?; + + if status_response.status.to_lowercase() == STATUS_READY { + logger::success(msg_system_status(&status_response.status)); + } else { + logger::warn(msg_system_status(&status_response.status)); + } + + let mut components_info = String::from(MSG_COMPONENTS); + let mut components = Vec::new(); + let mut not_ready_components = Vec::new(); + + for (component_name, component) in status_response.components { + let readable_name = deslugify(&component_name); + let mut component_info = format!("{}:\n - Status: {}", readable_name, component.status); + + if let Some(details) = &component.details { + for (key, value) in details.as_object().unwrap() { + component_info.push_str(&format!("\n - {}: {}", deslugify(key), value)); + } + } + + if component.status.to_lowercase() != STATUS_READY { + not_ready_components.push(readable_name); + } + + components.push(component_info); + } + + components.sort_by(|a, b| { + a.lines() + .count() + .cmp(&b.lines().count()) + .then_with(|| a.cmp(b)) + }); + + for chunk in components.chunks(2) { + components_info.push_str(&bordered_boxes(&chunk[0], chunk.get(1))); + } + + logger::info(components_info); + + if not_ready_components.is_empty() { + logger::outro(MSG_ALL_COMPONENTS_READY); + } else { + logger::warn(MSG_SOME_COMPONENTS_NOT_READY); + logger::outro(msg_not_ready_components(¬_ready_components.join(", "))); + } + + Ok(()) +} + +fn print_ports(shell: &Shell) -> anyhow::Result<()> { + let ports = EcosystemPortsScanner::scan(shell)?; + let grouped_ports = ports.group_by_file_path(); + + let mut all_port_lines: Vec = Vec::new(); + + for (file_path, port_infos) in grouped_ports { + let mut port_info_lines = String::new(); + + for port_info in port_infos { + port_info_lines.push_str(&format_port_info(&port_info)); + } + + all_port_lines.push(format!("{}:\n{}", file_path, port_info_lines)); + } + + all_port_lines.sort_by(|a, b| { + b.lines() + .count() + .cmp(&a.lines().count()) + .then_with(|| a.cmp(b)) + }); + + let mut components_info = String::from("Ports:\n"); + for chunk in all_port_lines.chunks(2) { + components_info.push_str(&bordered_boxes(&chunk[0], chunk.get(1))); + } + + logger::info(components_info); + Ok(()) +} + +pub async fn run(shell: &Shell, args: StatusArgs) -> anyhow::Result<()> { + if let Some(StatusSubcommands::Ports) = args.subcommand { + return print_ports(shell); + } + + let health_check_url = args.get_url(shell)?; + + print_status(health_check_url) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs new file mode 100644 index 00000000000..399a0fb0fec --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs @@ -0,0 +1,26 @@ +use std::net::TcpListener; + +pub fn is_port_open(port: u16) -> bool { + TcpListener::bind(("0.0.0.0", port)).is_err() || TcpListener::bind(("127.0.0.1", port)).is_err() +} + +pub fn deslugify(name: &str) -> String { + name.split('_') + .map(|word| { + let mut chars = word.chars(); + match chars.next() { + Some(first) => { + let capitalized = first.to_uppercase().collect::() + chars.as_str(); + match capitalized.as_str() { + "Http" => "HTTP".to_string(), + "Api" => "API".to_string(), + "Ws" => "WS".to_string(), + _ => capitalized, + } + } + None => String::new(), + } + }) + .collect::>() + .join(" ") +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs index c7e639f8e87..a38fff5a178 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs @@ -232,5 +232,28 @@ pub(super) const MSG_UNABLE_TO_READ_PARSE_JSON_ERR: &str = "Unable to parse JSON pub(super) const MSG_FAILED_TO_SEND_TXN_ERR: &str = "Failed to send transaction"; pub(super) const MSG_INVALID_L1_RPC_URL_ERR: &str = "Invalid L1 RPC URL"; +// Status related messages +pub(super) const MSG_STATUS_ABOUT: &str = "Get status of the server"; +pub(super) const MSG_API_CONFIG_NOT_FOUND_ERR: &str = "API config not found"; +pub(super) const MSG_STATUS_URL_HELP: &str = "URL of the health check endpoint"; +pub(super) const MSG_STATUS_PORTS_HELP: &str = "Show used ports"; +pub(super) const MSG_COMPONENTS: &str = "Components:\n"; +pub(super) const MSG_ALL_COMPONENTS_READY: &str = + "Overall System Status: All components operational and ready."; +pub(super) const MSG_SOME_COMPONENTS_NOT_READY: &str = + "Overall System Status: Some components are not ready."; + +pub(super) fn msg_system_status(status: &str) -> String { + format!("System Status: {}\n", status) +} + +pub(super) fn msg_failed_parse_response(response: &str) -> String { + format!("Failed to parse response: {}", response) +} + +pub(super) fn msg_not_ready_components(components: &str) -> String { + format!("Not Ready Components: {}", components) +} + // Genesis pub(super) const MSG_GENESIS_FILE_GENERATION_STARTED: &str = "Regenerate genesis file"; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs index 9272436a9b9..409c3a764eb 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs @@ -1,4 +1,6 @@ use clap::Subcommand; +use commands::status::args::StatusArgs; +use messages::MSG_STATUS_ABOUT; use xshell::Shell; use self::commands::{ @@ -41,6 +43,8 @@ pub enum DevCommands { ConfigWriter(ConfigWriterArgs), #[command(about = MSG_SEND_TXNS_ABOUT)] SendTransactions(SendTransactionsArgs), + #[command(about = MSG_STATUS_ABOUT)] + Status(StatusArgs), #[command(about = MSG_GENERATE_GENESIS_ABOUT, alias = "genesis")] GenerateGenesis, } @@ -59,6 +63,7 @@ pub async fn run(shell: &Shell, args: DevCommands) -> anyhow::Result<()> { DevCommands::SendTransactions(args) => { commands::send_transactions::run(shell, args).await? } + DevCommands::Status(args) => commands::status::run(shell, args).await?, DevCommands::GenerateGenesis => commands::genesis::run(shell).await?, } Ok(()) diff --git a/zkstack_cli/crates/zkstack/src/main.rs b/zkstack_cli/crates/zkstack/src/main.rs index 987de555ecf..b2e4af21cef 100644 --- a/zkstack_cli/crates/zkstack/src/main.rs +++ b/zkstack_cli/crates/zkstack/src/main.rs @@ -69,9 +69,9 @@ pub enum InceptionSubcommands { /// Run block-explorer #[command(subcommand)] Explorer(ExplorerCommands), - /// Update ZKsync #[command(subcommand)] Consensus(consensus::Command), + /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), #[command(hide = true)] diff --git a/zkstack_cli/crates/zkstack/src/utils/ports.rs b/zkstack_cli/crates/zkstack/src/utils/ports.rs index 04c8cef5ff5..6c299b99913 100644 --- a/zkstack_cli/crates/zkstack/src/utils/ports.rs +++ b/zkstack_cli/crates/zkstack/src/utils/ports.rs @@ -12,7 +12,24 @@ use xshell::Shell; use crate::defaults::{DEFAULT_OBSERVABILITY_PORT, PORT_RANGE_END, PORT_RANGE_START}; pub struct EcosystemPorts { - pub ports: HashMap>, + pub ports: HashMap>, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct PortInfo { + pub port: u16, + pub file_path: String, + pub description: String, +} + +impl fmt::Display for PortInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "[{}] {} >{}", + self.file_path, self.description, self.port + ) + } } impl EcosystemPorts { @@ -20,14 +37,19 @@ impl EcosystemPorts { self.ports.contains_key(&port) } - pub fn add_port_info(&mut self, port: u16, info: String) { + pub fn add_port_info(&mut self, port: u16, info: PortInfo) { + let info = PortInfo { + port, + file_path: info.file_path, + description: info.description, + }; self.ports.entry(port).or_default().push(info); } - pub fn allocate_port(&mut self, range: Range, info: String) -> anyhow::Result { + pub fn allocate_port(&mut self, range: Range, info: PortInfo) -> anyhow::Result { for port in range { if !self.is_port_assigned(port) { - self.add_port_info(port, info.to_string()); + self.add_port_info(port, info); return Ok(port); } } @@ -48,10 +70,15 @@ impl EcosystemPorts { let mut new_ports = HashMap::new(); for (desc, port) in config.get_default_ports()? { let mut new_port = port + offset; + let port_info = PortInfo { + port: new_port, + description: desc.clone(), + ..Default::default() + }; if self.is_port_assigned(new_port) { - new_port = self.allocate_port(port_range.clone(), desc.clone())?; + new_port = self.allocate_port(port_range.clone(), port_info)?; } else { - self.add_port_info(new_port, desc.to_string()); + self.add_port_info(new_port, port_info); } new_ports.insert(desc, new_port); } @@ -89,7 +116,7 @@ impl EcosystemPorts { if let Some(port) = val.as_u64().and_then(|p| u16::try_from(p).ok()) { let new_port = self.allocate_port( (port + offset as u16)..PORT_RANGE_END, - "".to_string(), + PortInfo::default(), )?; *val = Value::Number(serde_yaml::Number::from(new_port)); updated_ports.insert(port, new_port); @@ -132,6 +159,19 @@ impl EcosystemPorts { Ok(()) } + + pub fn group_by_file_path(&self) -> HashMap> { + let mut grouped_ports: HashMap> = HashMap::new(); + for port_infos in self.ports.values() { + for port_info in port_infos { + grouped_ports + .entry(port_info.file_path.clone()) + .or_default() + .push(port_info.clone()); + } + } + grouped_ports + } } impl fmt::Display for EcosystemPorts { @@ -278,8 +318,12 @@ impl EcosystemPortsScanner { ecosystem_ports: &mut EcosystemPorts, ) { if let Some(port) = value.as_u64().and_then(|p| u16::try_from(p).ok()) { - let description = format!("[{}] {}", file_path.display(), path); - ecosystem_ports.add_port_info(port, description); + let info = PortInfo { + port, + file_path: file_path.display().to_string(), + description: path.to_string(), + }; + ecosystem_ports.add_port_info(port, info); } } @@ -318,8 +362,12 @@ impl EcosystemPortsScanner { file_path: &Path, ecosystem_ports: &mut EcosystemPorts, ) { - let description = format!("[{}] {}", file_path.display(), path); - ecosystem_ports.add_port_info(port, description); + let info = PortInfo { + port, + file_path: file_path.display().to_string(), + description: path.to_string(), + }; + ecosystem_ports.add_port_info(port, info); } } @@ -360,7 +408,7 @@ impl ConfigWithChainPorts for ExplorerBackendPorts { mod tests { use std::path::PathBuf; - use crate::utils::ports::{EcosystemPorts, EcosystemPortsScanner}; + use crate::utils::ports::{EcosystemPorts, EcosystemPortsScanner, PortInfo}; #[test] fn test_traverse_yaml() { @@ -414,21 +462,28 @@ mod tests { // Check description: let port_3050_info = ecosystem_ports.ports.get(&3050).unwrap(); assert_eq!(port_3050_info.len(), 1); - assert_eq!( - port_3050_info[0], - "[test_config.yaml] api:web3_json_rpc:http_port" - ); + let expected_port_3050_info = PortInfo { + port: 3050, + file_path: "test_config.yaml".to_string(), + description: "api:web3_json_rpc:http_port".to_string(), + }; + assert_eq!(port_3050_info[0], expected_port_3050_info); let port_3412_info = ecosystem_ports.ports.get(&3412).unwrap(); assert_eq!(port_3412_info.len(), 2); - assert_eq!( - port_3412_info[0], - "[test_config.yaml] api:prometheus:listener_port" - ); - assert_eq!( - port_3412_info[1], - "[test_config.yaml] prometheus:listener_port" - ); + let expected_port_3412_info_0 = PortInfo { + port: 3412, + file_path: "test_config.yaml".to_string(), + description: "api:prometheus:listener_port".to_string(), + }; + let expected_port_3412_info_1 = PortInfo { + port: 3412, + file_path: "test_config.yaml".to_string(), + description: "prometheus:listener_port".to_string(), + }; + + assert_eq!(port_3412_info[0], expected_port_3412_info_0); + assert_eq!(port_3412_info[1], expected_port_3412_info_1); } #[test] @@ -451,7 +506,12 @@ mod tests { assert!(ecosystem_ports.is_port_assigned(3050)); let port_info = ecosystem_ports.ports.get(&3050).unwrap(); - assert_eq!(port_info[0], "[test_config.yaml] web3_json_rpc:http_port"); + let expected_port_info = PortInfo { + port: 3050, + file_path: "test_config.yaml".to_string(), + description: "web3_json_rpc:http_port".to_string(), + }; + assert_eq!(port_info[0], expected_port_info); } #[test] @@ -482,7 +542,12 @@ mod tests { assert!(ecosystem_ports.is_port_assigned(8546)); let port_info = ecosystem_ports.ports.get(&8546).unwrap(); - assert_eq!(port_info[0], "[test_config.yaml] reth:ports"); + let expected_port_info = PortInfo { + port: 8546, + file_path: "test_config.yaml".to_string(), + description: "reth:ports".to_string(), + }; + assert_eq!(port_info[0], expected_port_info); } #[test] From 4d283cad42e259655e8b5d197beb8a216fe3770b Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Fri, 18 Oct 2024 11:32:12 +0200 Subject: [PATCH 086/140] chore(zkstack_cli): Rename inception to zkstack (#3123) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Rename `Inception` to `ZkStack`, and `InceptionSubcommands` to `ZkStackSubcommands`. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- zkstack_cli/crates/zkstack/src/main.rs | 46 ++++++++++++-------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/zkstack_cli/crates/zkstack/src/main.rs b/zkstack_cli/crates/zkstack/src/main.rs index b2e4af21cef..404ac893810 100644 --- a/zkstack_cli/crates/zkstack/src/main.rs +++ b/zkstack_cli/crates/zkstack/src/main.rs @@ -32,15 +32,15 @@ mod utils; version = version_message(env!("CARGO_PKG_VERSION")), about )] -struct Inception { +struct ZkStack { #[command(subcommand)] - command: InceptionSubcommands, + command: ZkStackSubcommands, #[clap(flatten)] - global: InceptionGlobalArgs, + global: ZkStackGlobalArgs, } #[derive(Subcommand, Debug)] -pub enum InceptionSubcommands { +pub enum ZkStackSubcommands { /// Ecosystem related commands #[command(subcommand, alias = "e")] Ecosystem(Box), @@ -80,7 +80,7 @@ pub enum InceptionSubcommands { #[derive(Parser, Debug)] #[clap(next_help_heading = "Global options")] -struct InceptionGlobalArgs { +struct ZkStackGlobalArgs { /// Verbose mode #[clap(short, long, global = true)] verbose: bool, @@ -98,7 +98,7 @@ async fn main() -> anyhow::Result<()> { // We must parse arguments before printing the intro, because some autogenerated // Clap commands (like `--version` would look odd otherwise). - let inception_args = Inception::parse(); + let inception_args = ZkStack::parse(); init_prompt_theme(); @@ -123,26 +123,24 @@ async fn main() -> anyhow::Result<()> { Ok(()) } -async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Result<()> { +async fn run_subcommand(inception_args: ZkStack, shell: &Shell) -> anyhow::Result<()> { match inception_args.command { - InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, *args).await?, - InceptionSubcommands::Chain(args) => commands::chain::run(shell, *args).await?, - InceptionSubcommands::Dev(args) => commands::dev::run(shell, args).await?, - InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, - InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, - InceptionSubcommands::Containers(args) => commands::containers::run(shell, args)?, - InceptionSubcommands::ExternalNode(args) => { - commands::external_node::run(shell, args).await? - } - InceptionSubcommands::ContractVerifier(args) => { + ZkStackSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, *args).await?, + ZkStackSubcommands::Chain(args) => commands::chain::run(shell, *args).await?, + ZkStackSubcommands::Dev(args) => commands::dev::run(shell, args).await?, + ZkStackSubcommands::Prover(args) => commands::prover::run(shell, args).await?, + ZkStackSubcommands::Server(args) => commands::server::run(shell, args)?, + ZkStackSubcommands::Containers(args) => commands::containers::run(shell, args)?, + ZkStackSubcommands::ExternalNode(args) => commands::external_node::run(shell, args).await?, + ZkStackSubcommands::ContractVerifier(args) => { commands::contract_verifier::run(shell, args).await? } - InceptionSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, - InceptionSubcommands::Consensus(cmd) => cmd.run(shell).await?, - InceptionSubcommands::Portal => commands::portal::run(shell).await?, - InceptionSubcommands::Update(args) => commands::update::run(shell, args).await?, - InceptionSubcommands::Markdown => { - clap_markdown::print_help_markdown::(); + ZkStackSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, + ZkStackSubcommands::Consensus(cmd) => cmd.run(shell).await?, + ZkStackSubcommands::Portal => commands::portal::run(shell).await?, + ZkStackSubcommands::Update(args) => commands::update::run(shell, args).await?, + ZkStackSubcommands::Markdown => { + clap_markdown::print_help_markdown::(); } } Ok(()) @@ -150,7 +148,7 @@ async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Res fn init_global_config_inner( shell: &Shell, - inception_args: &InceptionGlobalArgs, + inception_args: &ZkStackGlobalArgs, ) -> anyhow::Result<()> { if let Some(name) = &inception_args.chain { if let Ok(config) = EcosystemConfig::from_file(shell) { From c488c55d7bd9a0c90037efb482d3b52acde0c781 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 18 Oct 2024 14:28:24 +0300 Subject: [PATCH 087/140] test(vm): Deduplicate `multivm` tests (#3109) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Deduplicates unit tests in the `multivm` crate by retaining a single generic copy parameterized by a VM (with test-specific functionality encapsulated in a new `TestedVm` trait). ## Why ❔ 2 copies of almost every test is difficult to maintain. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/multivm/README.md | 12 + core/lib/multivm/src/versions/README.md | 17 - core/lib/multivm/src/versions/mod.rs | 2 - core/lib/multivm/src/versions/testonly.rs | 93 ---- .../src/versions/testonly/block_tip.rs | 390 ++++++++++++++++ .../src/versions/testonly/bootloader.rs | 44 ++ .../versions/testonly/bytecode_publishing.rs | 36 ++ .../multivm/src/versions/testonly/circuits.rs | 73 +++ .../src/versions/testonly/code_oracle.rs | 242 ++++++++++ .../src/versions/testonly/default_aa.rs | 64 +++ .../src/versions/testonly/gas_limit.rs | 34 ++ .../versions/testonly/get_used_contracts.rs | 233 ++++++++++ .../src/versions/testonly/is_write_initial.rs | 38 ++ .../src/versions/testonly/l1_tx_execution.rs | 182 ++++++++ .../src/versions/testonly/l2_blocks.rs | 416 +++++++++++++++++ core/lib/multivm/src/versions/testonly/mod.rs | 228 ++++++++++ .../src/versions/testonly/nonce_holder.rs | 200 ++++++++ .../src/versions/testonly/precompiles.rs | 110 +++++ .../multivm/src/versions/testonly/refunds.rs | 215 +++++++++ .../src/versions/testonly/require_eip712.rs | 146 ++++++ .../src/versions/testonly/rollbacks.rs | 212 +++++++++ .../sekp256r1.rs => testonly/secp256r1.rs} | 13 +- .../versions/{tests.rs => testonly/shadow.rs} | 14 +- .../src/versions/testonly/simple_execution.rs | 75 +++ .../multivm/src/versions/testonly/storage.rs | 125 +++++ .../src/versions/testonly/tester/mod.rs | 229 ++++++++++ .../tester/transaction_test_info.rs | 51 ++- .../testonly/tracing_execution_error.rs | 63 +++ .../multivm/src/versions/testonly/transfer.rs | 200 ++++++++ .../multivm/src/versions/testonly/upgrade.rs | 322 +++++++++++++ .../src/versions/vm_fast/tests/block_tip.rs | 392 +--------------- .../src/versions/vm_fast/tests/bootloader.rs | 50 +- .../vm_fast/tests/bytecode_publishing.rs | 38 +- .../src/versions/vm_fast/tests/call_tracer.rs | 92 ---- .../src/versions/vm_fast/tests/circuits.rs | 74 +-- .../src/versions/vm_fast/tests/code_oracle.rs | 247 +--------- .../src/versions/vm_fast/tests/default_aa.rs | 81 +--- .../src/versions/vm_fast/tests/gas_limit.rs | 39 +- .../vm_fast/tests/get_used_contracts.rs | 235 +--------- .../vm_fast/tests/invalid_bytecode.rs | 120 ----- .../vm_fast/tests/is_write_initial.rs | 46 +- .../versions/vm_fast/tests/l1_tx_execution.rs | 196 +------- .../src/versions/vm_fast/tests/l2_blocks.rs | 421 +---------------- .../multivm/src/versions/vm_fast/tests/mod.rs | 145 +++++- .../versions/vm_fast/tests/nonce_holder.rs | 180 +------- .../src/versions/vm_fast/tests/precompiles.rs | 113 +---- .../versions/vm_fast/tests/prestate_tracer.rs | 143 ------ .../src/versions/vm_fast/tests/refunds.rs | 217 +-------- .../versions/vm_fast/tests/require_eip712.rs | 175 +------ .../src/versions/vm_fast/tests/rollbacks.rs | 200 +------- .../src/versions/vm_fast/tests/secp256r1.rs | 6 + .../vm_fast/tests/simple_execution.rs | 74 +-- .../src/versions/vm_fast/tests/storage.rs | 131 +----- .../src/versions/vm_fast/tests/tester/mod.rs | 6 - .../tests/tester/transaction_test_info.rs | 240 ---------- .../vm_fast/tests/tester/vm_tester.rs | 231 ---------- .../vm_fast/tests/tracing_execution_error.rs | 53 +-- .../src/versions/vm_fast/tests/transfer.rs | 213 +-------- .../src/versions/vm_fast/tests/upgrade.rs | 340 +------------- .../src/versions/vm_fast/tests/utils.rs | 134 ------ .../src/versions/vm_latest/tests/block_tip.rs | 427 +---------------- .../versions/vm_latest/tests/bootloader.rs | 55 +-- .../vm_latest/tests/bytecode_publishing.rs | 40 +- .../versions/vm_latest/tests/call_tracer.rs | 31 +- .../src/versions/vm_latest/tests/circuits.rs | 75 +-- .../versions/vm_latest/tests/code_oracle.rs | 277 +---------- .../versions/vm_latest/tests/default_aa.rs | 78 +--- .../versions/vm_latest/tests/evm_emulator.rs | 25 +- .../src/versions/vm_latest/tests/gas_limit.rs | 45 +- .../vm_latest/tests/get_used_contracts.rs | 246 +--------- .../vm_latest/tests/is_write_initial.rs | 48 +- .../vm_latest/tests/l1_tx_execution.rs | 193 +------- .../src/versions/vm_latest/tests/l2_blocks.rs | 430 +----------------- .../src/versions/vm_latest/tests/mod.rs | 260 ++++++++++- .../versions/vm_latest/tests/nonce_holder.rs | 186 +------- .../versions/vm_latest/tests/precompiles.rs | 139 +----- .../vm_latest/tests/prestate_tracer.rs | 41 +- .../src/versions/vm_latest/tests/refunds.rs | 224 +-------- .../vm_latest/tests/require_eip712.rs | 165 +------ .../src/versions/vm_latest/tests/rollbacks.rs | 217 +-------- .../src/versions/vm_latest/tests/secp256r1.rs | 9 + .../src/versions/vm_latest/tests/sekp256r1.rs | 74 --- .../vm_latest/tests/simple_execution.rs | 77 +--- .../src/versions/vm_latest/tests/storage.rs | 186 +------- .../vm_latest/tests/tester/inner_state.rs | 131 ------ .../versions/vm_latest/tests/tester/mod.rs | 9 - .../vm_latest/tests/tester/vm_tester.rs | 299 ------------ .../tests/tracing_execution_error.rs | 53 +-- .../src/versions/vm_latest/tests/transfer.rs | 218 +-------- .../src/versions/vm_latest/tests/upgrade.rs | 351 +------------- .../src/versions/vm_latest/tests/utils.rs | 142 ------ core/lib/vm_interface/src/storage/view.rs | 10 + core/tests/test_account/src/lib.rs | 6 + 93 files changed, 4652 insertions(+), 8826 deletions(-) delete mode 100644 core/lib/multivm/src/versions/README.md delete mode 100644 core/lib/multivm/src/versions/testonly.rs create mode 100644 core/lib/multivm/src/versions/testonly/block_tip.rs create mode 100644 core/lib/multivm/src/versions/testonly/bootloader.rs create mode 100644 core/lib/multivm/src/versions/testonly/bytecode_publishing.rs create mode 100644 core/lib/multivm/src/versions/testonly/circuits.rs create mode 100644 core/lib/multivm/src/versions/testonly/code_oracle.rs create mode 100644 core/lib/multivm/src/versions/testonly/default_aa.rs create mode 100644 core/lib/multivm/src/versions/testonly/gas_limit.rs create mode 100644 core/lib/multivm/src/versions/testonly/get_used_contracts.rs create mode 100644 core/lib/multivm/src/versions/testonly/is_write_initial.rs create mode 100644 core/lib/multivm/src/versions/testonly/l1_tx_execution.rs create mode 100644 core/lib/multivm/src/versions/testonly/l2_blocks.rs create mode 100644 core/lib/multivm/src/versions/testonly/mod.rs create mode 100644 core/lib/multivm/src/versions/testonly/nonce_holder.rs create mode 100644 core/lib/multivm/src/versions/testonly/precompiles.rs create mode 100644 core/lib/multivm/src/versions/testonly/refunds.rs create mode 100644 core/lib/multivm/src/versions/testonly/require_eip712.rs create mode 100644 core/lib/multivm/src/versions/testonly/rollbacks.rs rename core/lib/multivm/src/versions/{vm_fast/tests/sekp256r1.rs => testonly/secp256r1.rs} (91%) rename core/lib/multivm/src/versions/{tests.rs => testonly/shadow.rs} (96%) create mode 100644 core/lib/multivm/src/versions/testonly/simple_execution.rs create mode 100644 core/lib/multivm/src/versions/testonly/storage.rs create mode 100644 core/lib/multivm/src/versions/testonly/tester/mod.rs rename core/lib/multivm/src/versions/{vm_latest/tests => testonly}/tester/transaction_test_info.rs (87%) create mode 100644 core/lib/multivm/src/versions/testonly/tracing_execution_error.rs create mode 100644 core/lib/multivm/src/versions/testonly/transfer.rs create mode 100644 core/lib/multivm/src/versions/testonly/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs create mode 100644 core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/utils.rs create mode 100644 core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/utils.rs diff --git a/core/lib/multivm/README.md b/core/lib/multivm/README.md index 5e2af426ae5..f5e8a552242 100644 --- a/core/lib/multivm/README.md +++ b/core/lib/multivm/README.md @@ -4,3 +4,15 @@ This crate represents a wrapper over several versions of VM that have been used glue code that allows switching the VM version based on the externally provided marker while preserving the public interface. This crate exists to enable the external node to process breaking upgrades and re-execute all the transactions from the genesis block. + +## Developer guidelines + +### Adding tests + +If you want to add unit tests for the VM wrapper, consider the following: + +- Whenever possible, make tests reusable; declare test logic in the [`testonly`](src/versions/testonly/mod.rs) module, + and then instantiate tests using this logic for the supported VM versions. If necessary, extend the tested VM trait so + that test logic can be defined in a generic way. See the `testonly` module docs for more detailed guidelines. +- Do not use an RNG where it can be avoided (e.g., for test contract addresses). +- Avoid using zero / default values in cases they can be treated specially by the tested code. diff --git a/core/lib/multivm/src/versions/README.md b/core/lib/multivm/src/versions/README.md deleted file mode 100644 index 01c57509197..00000000000 --- a/core/lib/multivm/src/versions/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# MultiVM dependencies - -This folder contains the old versions of the VM we have used in the past. The `multivm` crate uses them to dynamically -switch the version we use to be able to sync from the genesis. This is a temporary measure until a "native" solution is -implemented (i.e., the `vm` crate would itself know the changes between versions, and thus we will have only the -functional diff between versions, not several fully-fledged VMs). - -## Versions - -| Name | Protocol versions | Description | -| ---------------------- | ----------------- | --------------------------------------------------------------------- | -| vm_m5 | 0 - 3 | Release for the testnet launch | -| vm_m6 | 4 - 6 | Release for the mainnet launch | -| vm_1_3_2 | 7 - 12 | Release 1.3.2 of the crypto circuits | -| vm_virtual_blocks | 13 - 15 | Adding virtual blocks to help with block number / timestamp migration | -| vm_refunds_enhancement | 16 - 17 | Fixing issue related to refunds in VM | -| vm_boojum_integration | 18 - | New Proving system (boojum), vm version 1.4.0 | diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index bcb246cece4..1df706a6cce 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,8 +1,6 @@ mod shared; #[cfg(test)] mod testonly; -#[cfg(test)] -mod tests; pub mod vm_1_3_2; pub mod vm_1_4_1; pub mod vm_1_4_2; diff --git a/core/lib/multivm/src/versions/testonly.rs b/core/lib/multivm/src/versions/testonly.rs deleted file mode 100644 index 51a4d0842d9..00000000000 --- a/core/lib/multivm/src/versions/testonly.rs +++ /dev/null @@ -1,93 +0,0 @@ -use zksync_contracts::BaseSystemContracts; -use zksync_test_account::Account; -use zksync_types::{ - block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, utils::storage_key_for_eth_balance, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{storage::InMemoryStorage, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, -}; - -pub(super) fn default_system_env() -> SystemEnv { - SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - } -} - -pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(super) fn make_account_rich(storage: &mut InMemoryStorage, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); -} - -#[derive(Debug, Clone)] -pub(super) struct ContractToDeploy { - bytecode: Vec, - address: Address, - is_account: bool, -} - -impl ContractToDeploy { - pub fn new(bytecode: Vec, address: Address) -> Self { - Self { - bytecode, - address, - is_account: false, - } - } - - pub fn account(bytecode: Vec, address: Address) -> Self { - Self { - bytecode, - address, - is_account: true, - } - } - - pub fn insert(&self, storage: &mut InMemoryStorage) { - let deployer_code_key = get_code_key(&self.address); - storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); - if self.is_account { - let is_account_key = get_is_account_key(&self.address); - storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); - } - - /// Inserts the contracts into the test environment, bypassing the deployer system contract. - pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { - for contract in contracts { - contract.insert(storage); - } - } -} diff --git a/core/lib/multivm/src/versions/testonly/block_tip.rs b/core/lib/multivm/src/versions/testonly/block_tip.rs new file mode 100644 index 00000000000..7700f347ca6 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/block_tip.rs @@ -0,0 +1,390 @@ +use ethabi::Token; +use itertools::Itertools; +use zksync_contracts::load_sys_contract; +use zksync_system_constants::{ + CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, +}; +use zksync_types::{ + commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use super::{ + get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, + tester::{TestedVm, VmTesterBuilder}, +}; +use crate::{ + interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + versions::testonly::default_l1_batch, + vm_latest::constants::{ + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, + MAX_VM_PUBDATA_PER_BATCH, + }, +}; + +#[derive(Debug, Clone, Default)] +struct L1MessengerTestData { + l2_to_l1_logs: usize, + messages: Vec>, + bytecodes: Vec>, + state_diffs: Vec, +} + +struct MimicCallInfo { + to: Address, + who_to_mimic: Address, + data: Vec, +} + +const CALLS_PER_TX: usize = 1_000; + +fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { + let complex_upgrade = get_complex_upgrade_abi(); + let l1_messenger = load_sys_contract("L1Messenger"); + + let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|i| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendL2ToL1Log") + .unwrap() + .encode_input(&[ + Token::Bool(false), + Token::FixedBytes(H256::from_low_u64_be(2 * i as u64).0.to_vec()), + Token::FixedBytes(H256::from_low_u64_be(2 * i as u64 + 1).0.to_vec()), + ]) + .unwrap(), + }); + let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendToL1") + .unwrap() + .encode_input(&[Token::Bytes(message.clone())]) + .unwrap(), + }); + let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("requestBytecodeL1Publication") + .unwrap() + .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) + .unwrap(), + }); + + let encoded_calls = logs_mimic_calls + .chain(messages_mimic_calls) + .chain(bytecodes_mimic_calls) + .map(|call| { + Token::Tuple(vec![ + Token::Address(call.to), + Token::Address(call.who_to_mimic), + Token::Bytes(call.data), + ]) + }) + .chunks(CALLS_PER_TX) + .into_iter() + .map(|chunk| { + complex_upgrade + .function("mimicCalls") + .unwrap() + .encode_input(&[Token::Array(chunk.collect_vec())]) + .unwrap() + }) + .collect_vec(); + + encoded_calls +} + +struct TestStatistics { + pub max_used_gas: u32, + pub circuit_statistics: u64, + pub execution_metrics_size: u64, +} + +struct StatisticsTagged { + pub statistics: TestStatistics, + pub tag: String, +} + +fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { + let mut storage = get_empty_storage(); + let complex_upgrade_code = read_complex_upgrade(); + + // For this test we'll just put the bytecode onto the force deployer address + storage.set_value( + get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), + hash_bytecode(&complex_upgrade_code), + ); + storage.store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); + + // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute + // the gas limit + + let batch_env = L1BatchEnv { + fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), + ..default_l1_batch(zksync_types::L1BatchNumber(1)) + }; + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_l1_batch_env(batch_env) + .build::(); + + let bytecodes: Vec<_> = test_data.bytecodes.iter().map(Vec::as_slice).collect(); + vm.vm.insert_bytecodes(&bytecodes); + + let txs_data = populate_mimic_calls(test_data.clone()); + let account = &mut vm.rich_accounts[0]; + + for (i, data) in txs_data.into_iter().enumerate() { + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), + calldata: data, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction {i} wasn't successful for input: {:#?}", + test_data + ); + } + + // Now we count how much ergs were spent at the end of the batch + // It is assumed that the top level frame is the bootloader + let gas_before = vm.vm.gas_remaining(); + let result = vm + .vm + .execute_with_state_diffs(test_data.state_diffs.clone(), VmExecutionMode::Batch); + assert!( + !result.result.is_failed(), + "Batch wasn't successful for input: {test_data:?}" + ); + let gas_after = vm.vm.gas_remaining(); + assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); + + TestStatistics { + max_used_gas: gas_before - gas_after, + circuit_statistics: result.statistics.circuit_statistic.total() as u64, + execution_metrics_size: result.get_execution_metrics(None).size() as u64, + } +} + +fn generate_state_diffs( + repeated_writes: bool, + small_diff: bool, + number_of_state_diffs: usize, +) -> Vec { + (0..number_of_state_diffs) + .map(|i| { + let address = Address::from_low_u64_be(i as u64); + let key = U256::from(i); + let enumeration_index = if repeated_writes { i + 1 } else { 0 }; + + let (initial_value, final_value) = if small_diff { + // As small as it gets, one byte to denote zeroing out the value + (U256::from(1), U256::from(0)) + } else { + // As large as it gets + (U256::from(0), U256::from(2).pow(255.into())) + }; + + StateDiffRecord { + address, + key, + derived_key: u256_to_h256(i.into()).0, + enumeration_index: enumeration_index as u64, + initial_value, + final_value, + } + }) + .collect() +} + +// A valid zkEVM bytecode has odd number of 32 byte words +fn get_valid_bytecode_length(length: usize) -> usize { + // Firstly ensure that the length is divisible by 32 + let length_padded_to_32 = if length % 32 == 0 { + length + } else { + length + 32 - (length % 32) + }; + + // Then we ensure that the number returned by division by 32 is odd + if length_padded_to_32 % 64 == 0 { + length_padded_to_32 + 32 + } else { + length_padded_to_32 + } +} + +pub(crate) fn test_dry_run_upper_bound() { + // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). + // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` + // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. + const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = + (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; + + // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. + // To get the upper bound, we'll try to do the following: + // 1. Max number of logs. + // 2. Lots of small L2->L1 messages / one large L2->L1 message. + // 3. Lots of small bytecodes / one large bytecode. + // 4. Lots of storage slot updates. + + let statistics = vec![ + // max logs + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, + ..Default::default() + }), + tag: "max_logs".to_string(), + }, + // max messages + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, + // so the max number of pubdata is bound by it + messages: vec![ + vec![0; 0]; + MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) + ], + ..Default::default() + }), + tag: "max_messages".to_string(), + }, + // long message + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it + messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], + ..Default::default() + }), + tag: "long_message".to_string(), + }, + // max bytecodes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each bytecode must be at least 32 bytes long. + // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number + bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], + ..Default::default() + }), + tag: "max_bytecodes".to_string(), + }, + // long bytecode + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + bytecodes: vec![ + vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; + 1 + ], + ..Default::default() + }), + tag: "long_bytecode".to_string(), + }, + // lots of small repeated writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) + state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), + ..Default::default() + }), + tag: "small_repeated_writes".to_string(), + }, + // lots of big repeated writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + true, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, + ), + ..Default::default() + }), + tag: "big_repeated_writes".to_string(), + }, + // lots of small initial writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out + state_diffs: generate_state_diffs( + false, + true, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, + ), + ..Default::default() + }), + tag: "small_initial_writes".to_string(), + }, + // lots of large initial writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + false, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, + ), + ..Default::default() + }), + tag: "big_initial_writes".to_string(), + }, + ]; + + // We use 2x overhead for the batch tip compared to the worst estimated scenario. + let max_used_gas = statistics + .iter() + .map(|s| (s.statistics.max_used_gas, s.tag.clone())) + .max() + .unwrap(); + assert!( + max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, + "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", + max_used_gas.1, + max_used_gas.0, + BOOTLOADER_BATCH_TIP_OVERHEAD + ); + + let circuit_statistics = statistics + .iter() + .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) + .max() + .unwrap(); + assert!( + circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", + circuit_statistics.1, + circuit_statistics.0, + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD + ); + + let execution_metrics_size = statistics + .iter() + .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) + .max() + .unwrap(); + assert!( + execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", + execution_metrics_size.1, + execution_metrics_size.0, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD + ); +} diff --git a/core/lib/multivm/src/versions/testonly/bootloader.rs b/core/lib/multivm/src/versions/testonly/bootloader.rs new file mode 100644 index 00000000000..e3177e07851 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/bootloader.rs @@ -0,0 +1,44 @@ +use assert_matches::assert_matches; +use zksync_types::U256; + +use super::{get_bootloader, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; +use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_dummy_bootloader() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!result.result.is_failed()); + + let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); + vm.vm + .verify_required_bootloader_heap(&[(0, correct_first_cell)]); +} + +pub(crate) fn test_bootloader_out_of_gas() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_bootloader_gas_limit(10) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let res = vm.vm.execute(VmExecutionMode::Batch); + + assert_matches!( + res.result, + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas + } + ); +} diff --git a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs new file mode 100644 index 00000000000..33af7be8cc6 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs @@ -0,0 +1,36 @@ +use zksync_test_account::{DeployContractsTx, TxType}; + +use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterfaceExt}, + utils::bytecode, +}; + +pub(crate) fn test_bytecode_publishing() { + // In this test, we aim to ensure that the contents of the compressed bytecodes + // are included as part of the L2->L1 long messages + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; + + let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.execute(VmExecutionMode::Batch); + + let state = vm.vm.get_current_execution_state(); + let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); + assert!( + long_messages.contains(&compressed_bytecode), + "Bytecode not published" + ); +} diff --git a/core/lib/multivm/src/versions/testonly/circuits.rs b/core/lib/multivm/src/versions/testonly/circuits.rs new file mode 100644 index 00000000000..9503efe9208 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/circuits.rs @@ -0,0 +1,73 @@ +use zksync_types::{Address, Execute, U256}; + +use super::tester::VmTesterBuilder; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + versions::testonly::TestedVm, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +/// Checks that estimated number of circuits for simple transfer doesn't differ much +/// from hardcoded expected value. +pub(crate) fn test_circuits() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(Address::repeat_byte(1)), + calldata: Vec::new(), + value: U256::from(1u8), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!res.result.is_failed(), "{res:#?}"); + + let s = res.statistics.circuit_statistic; + // Check `circuit_statistic`. + const EXPECTED: [f32; 13] = [ + 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, + 0.0, 0.0, 0.0, + ]; + let actual = [ + (s.main_vm, "main_vm"), + (s.ram_permutation, "ram_permutation"), + (s.storage_application, "storage_application"), + (s.storage_sorter, "storage_sorter"), + (s.code_decommitter, "code_decommitter"), + (s.code_decommitter_sorter, "code_decommitter_sorter"), + (s.log_demuxer, "log_demuxer"), + (s.events_sorter, "events_sorter"), + (s.keccak256, "keccak256"), + (s.ecrecover, "ecrecover"), + (s.sha256, "sha256"), + (s.secp256k1_verify, "secp256k1_verify"), + (s.transient_storage_checker, "transient_storage_checker"), + ]; + for ((actual, name), expected) in actual.iter().zip(EXPECTED) { + if expected == 0.0 { + assert_eq!( + *actual, expected, + "Check failed for {}, expected {}, actual {}", + name, expected, actual + ); + } else { + let diff = (actual - expected) / expected; + assert!( + diff.abs() < 0.1, + "Check failed for {}, expected {}, actual {}", + name, + expected, + actual + ); + } + } +} diff --git a/core/lib/multivm/src/versions/testonly/code_oracle.rs b/core/lib/multivm/src/versions/testonly/code_oracle.rs new file mode 100644 index 00000000000..b786539329b --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/code_oracle.rs @@ -0,0 +1,242 @@ +use ethabi::Token; +use zksync_types::{ + get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use super::{ + get_empty_storage, load_precompiles_contract, read_precompiles_contract, read_test_contract, + tester::VmTesterBuilder, TestedVm, +}; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + versions::testonly::ContractToDeploy, +}; + +fn generate_large_bytecode() -> Vec { + // This is the maximal possible size of a zkEVM bytecode + vec![2u8; ((1 << 16) - 1) * 32] +} + +pub(crate) fn test_code_oracle() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = read_precompiles_contract(); + + // Filling the zkevm bytecode + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode, + precompiles_contract_address, + )]) + .with_storage(storage) + .build::(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes(&[normal_zkevm_bytecode.as_slice()]); + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // Now, we ask for the same bytecode. We use to partially check whether the memory page with + // the decommitted bytecode gets erased (it shouldn't). + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +fn find_code_oracle_cost_log( + precompiles_contract_address: Address, + logs: &[StorageLogWithPreviousValue], +) -> &StorageLogWithPreviousValue { + logs.iter() + .find(|log| { + *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() + }) + .expect("no code oracle cost log") +} + +pub(crate) fn test_code_oracle_big_bytecode() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = read_precompiles_contract(); + + let big_zkevm_bytecode = generate_large_bytecode(); + let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); + let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); + + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&big_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode, + precompiles_contract_address, + )]) + .with_storage(storage) + .build::(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes(&[big_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +pub(crate) fn test_refunds_in_code_oracle() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = read_precompiles_contract(); + + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + // Execute code oracle twice with identical VM state that only differs in that the queried bytecode + // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas + // for already decommitted codes). + let mut oracle_costs = vec![]; + for decommit in [false, true] { + let mut vm = VmTesterBuilder::new() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode.clone(), + precompiles_contract_address, + )]) + .with_storage(storage.clone()) + .build::(); + + vm.vm.insert_bytecodes(&[normal_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + if decommit { + let is_fresh = vm.vm.manually_decommit(normal_zkevm_bytecode_hash); + assert!(is_fresh); + } + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + let log = + find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); + oracle_costs.push(log.log.value); + } + + // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` + // in `CodeOracle.yul`. + let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); + assert_eq!( + code_oracle_refund, + (4 * (normal_zkevm_bytecode.len() / 32)).into() + ); +} diff --git a/core/lib/multivm/src/versions/testonly/default_aa.rs b/core/lib/multivm/src/versions/testonly/default_aa.rs new file mode 100644 index 00000000000..3f121dcf7e6 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/default_aa.rs @@ -0,0 +1,64 @@ +use zksync_test_account::{DeployContractsTx, TxType}; +use zksync_types::{ + get_code_key, get_known_code_key, get_nonce_key, + system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, + utils::storage_key_for_eth_balance, + U256, +}; +use zksync_utils::h256_to_u256; + +use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + vm_latest::utils::fee::get_batch_base_fee, +}; + +pub(crate) fn test_default_aa_interaction() { + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let DeployContractsTx { + tx, + bytecode_hash, + address, + } = account.get_deploy_tx(&counter, None, TxType::L2); + let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.l1_batch_env); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.execute(VmExecutionMode::Batch); + + vm.vm.get_current_execution_state(); + + // Both deployment and ordinary nonce should be incremented by one. + let account_nonce_key = get_nonce_key(&account.address); + let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&address); + + let operator_balance_key = storage_key_for_eth_balance(&vm.l1_batch_env.fee_account); + let expected_fee = maximal_fee + - U256::from(result.refunds.gas_refunded) + * U256::from(get_batch_base_fee(&vm.l1_batch_env)); + + let expected_slots = [ + (account_nonce_key, expected_nonce), + (known_codes_key, 1.into()), + (account_code_key, h256_to_u256(bytecode_hash)), + (operator_balance_key, expected_fee), + ]; + vm.vm.verify_required_storage(&expected_slots); +} diff --git a/core/lib/multivm/src/versions/testonly/gas_limit.rs b/core/lib/multivm/src/versions/testonly/gas_limit.rs new file mode 100644 index 00000000000..5e31eb2b159 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/gas_limit.rs @@ -0,0 +1,34 @@ +use zksync_test_account::Account; +use zksync_types::{fee::Fee, Execute}; + +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::TxExecutionMode, + vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, +}; + +/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +pub(crate) fn test_tx_gas_limit_offset() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let gas_limit = 9999.into(); + let tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, + Some(Fee { + gas_limit, + ..Account::default_fee() + }), + ); + + vm.vm.push_transaction(tx); + + let slot = (TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET) as u32; + vm.vm.verify_required_bootloader_heap(&[(slot, gas_limit)]); +} diff --git a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs new file mode 100644 index 00000000000..fbad94a0eee --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs @@ -0,0 +1,233 @@ +use std::{collections::HashSet, iter}; + +use assert_matches::assert_matches; +use ethabi::Token; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_test_account::{Account, TxType}; +use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; + +use super::{ + read_proxy_counter_contract, read_test_contract, + tester::{VmTester, VmTesterBuilder}, + TestedVm, BASE_SYSTEM_CONTRACTS, +}; +use crate::{ + interface::{ + ExecutionResult, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, + }, + versions::testonly::ContractToDeploy, +}; + +pub(crate) fn test_get_used_contracts() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); + + // create and push and execute some not-empty factory deps transaction with success status + // to check that `get_decommitted_hashes()` updates + let contract_code = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); + vm.vm.push_transaction(tx.tx.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert!(vm + .vm + .decommitted_hashes() + .contains(&h256_to_u256(tx.bytecode_hash))); + + // Note: `Default_AA` will be in the list of used contracts if L2 tx is used + assert_eq!( + vm.vm.decommitted_hashes(), + known_bytecodes_without_base_system_contracts(&vm.vm) + ); + + // create push and execute some non-empty factory deps transaction that fails + // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) + + let calldata = [1, 2, 3]; + let big_calldata: Vec = calldata + .iter() + .cycle() + .take(calldata.len() * 1024) + .cloned() + .collect(); + let account2 = Account::from_seed(u32::MAX); + assert_ne!(account2.address, account.address); + let tx2 = account2.get_l1_tx( + Execute { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata: big_calldata, + value: Default::default(), + factory_deps: vec![vec![1; 32]], + }, + 1, + ); + + vm.vm.push_transaction(tx2.clone()); + + let res2 = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(res2.result.is_failed()); + + for factory_dep in tx2.execute.factory_deps { + let hash = hash_bytecode(&factory_dep); + let hash_to_u256 = h256_to_u256(hash); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).contains(&hash_to_u256)); + assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); + } +} + +fn known_bytecodes_without_base_system_contracts(vm: &impl TestedVm) -> HashSet { + let mut known_bytecodes_without_base_system_contracts = vm.known_bytecode_hashes(); + known_bytecodes_without_base_system_contracts + .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + let was_removed = + known_bytecodes_without_base_system_contracts.remove(&h256_to_u256(evm_emulator.hash)); + assert!(was_removed); + } + known_bytecodes_without_base_system_contracts +} + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = read_test_contract(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +#[derive(Debug)] +struct ProxyCounterData { + proxy_counter_address: Address, + counter_bytecode_hash: U256, +} + +fn execute_proxy_counter( + gas: u32, +) -> (VmTester, ProxyCounterData, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + &proxy_counter_bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + let data = ProxyCounterData { + proxy_counter_address: deploy_tx.address, + counter_bytecode_hash, + }; + (vm, data, exec_result) +} + +pub(crate) fn test_get_used_contracts_with_far_call() { + let (vm, data, exec_result) = execute_proxy_counter::(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +pub(crate) fn test_get_used_contracts_with_out_of_gas_far_call() { + let (mut vm, data, exec_result) = execute_proxy_counter::(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + // Execute another transaction with a successful far call and check that it's still charged for decommitment. + let account = &mut vm.rich_accounts[0]; + let (_, proxy_counter_abi) = read_proxy_counter_contract(); + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(data.proxy_counter_address), + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let proxy_counter_cost_key = StorageKey::new( + AccountTreeId::new(data.proxy_counter_address), + H256::from_low_u64_be(1), + ); + let far_call_cost_log = exec_result + .logs + .storage_logs + .iter() + .find(|log| log.log.key == proxy_counter_cost_key) + .expect("no cost log"); + assert!( + far_call_cost_log.previous_value.is_zero(), + "{far_call_cost_log:?}" + ); + let far_call_cost = h256_to_u256(far_call_cost_log.log.value); + assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); +} diff --git a/core/lib/multivm/src/versions/testonly/is_write_initial.rs b/core/lib/multivm/src/versions/testonly/is_write_initial.rs new file mode 100644 index 00000000000..ef1fe2088c1 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/is_write_initial.rs @@ -0,0 +1,38 @@ +use zksync_test_account::TxType; +use zksync_types::get_nonce_key; + +use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; +use crate::interface::{storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_is_write_initial_behaviour() { + // In this test, we check result of `is_write_initial` at different stages. + // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't + // messed up it with the repeated writes during the one batch execution. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; + + let nonce_key = get_nonce_key(&account.address); + // Check that the next write to the nonce key will be initial. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); + + let contract_code = read_test_contract(); + let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; + + vm.vm.push_transaction(tx); + vm.vm.execute(VmExecutionMode::OneTx); + + // Check that `is_write_initial` still returns true for the nonce key. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); +} diff --git a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs new file mode 100644 index 00000000000..212b1f16f20 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs @@ -0,0 +1,182 @@ +use ethabi::Token; +use zksync_contracts::l1_messenger_contract; +use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; +use zksync_test_account::TxType; +use zksync_types::{ + get_code_key, get_known_code_key, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + Execute, ExecuteTransactionCommon, U256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use super::{read_test_contract, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + utils::StorageWritesDeduplicator, +}; + +pub(crate) fn test_l1_tx_execution() { + // In this test, we try to execute a contract deployment from L1 + // Here instead of marking code hash via the bootloader means, we will be + // using L1->L2 communication, the same it would likely be done during the priority mode. + + // There are always at least 9 initial writes here, because we pay fees from l1: + // - `totalSupply` of ETH token + // - balance of the refund recipient + // - balance of the bootloader + // - `tx_rolling` hash + // - `gasPerPubdataByte` + // - `basePubdataSpent` + // - rolling hash of L2->L1 logs + // - transaction number in block counter + // - L2->L1 log counter in `L1Messenger` + + // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. + let basic_initial_writes = 5; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let contract_code = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); + let tx_hash = deploy_tx.tx.hash(); + + let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { + shard_id: 0, + is_service: true, + tx_number_in_block: 0, + sender: BOOTLOADER_ADDRESS, + key: tx_hash, + value: u256_to_h256(U256::from(1u32)), + }] + .into_iter() + .map(UserL2ToL1Log) + .collect(); + + vm.vm.push_transaction(deploy_tx.tx.clone()); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&deploy_tx.address); + + assert!(!res.result.is_failed()); + + vm.vm.verify_required_storage(&[ + (known_codes_key, U256::from(1)), + (account_code_key, h256_to_u256(deploy_tx.bytecode_hash)), + ]); + assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + true, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + + // Tx panicked + assert_eq!(res.initial_storage_writes, basic_initial_writes); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx.clone()); + let res = vm.vm.execute(VmExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We changed one slot inside contract. + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); + + // No repeated writes + let repeated_writes = res.repeated_storage_writes; + assert_eq!(res.repeated_storage_writes, 0); + + vm.vm.push_transaction(tx); + let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. + // But now the base pubdata spent has changed too. + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); + assert_eq!(res.repeated_storage_writes, repeated_writes); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + Some(10.into()), + false, + TxType::L1 { serial_id: 1 }, + ); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + // Method is not payable tx should fail + assert!(result.result.is_failed(), "The transaction should fail"); + + let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); + assert_eq!(res.repeated_storage_writes, 1); +} + +pub(crate) fn test_l1_tx_execution_high_gas_limit() { + // In this test, we try to execute an L1->L2 transaction with a high gas limit. + // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, + // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + let l1_messenger = l1_messenger_contract(); + + let contract_function = l1_messenger.function("sendToL1").unwrap(); + let params = [ + // Even a message of size 100k should not be able to be sent by a priority transaction + Token::Bytes(vec![0u8; 100_000]), + ]; + let calldata = contract_function.encode_input(¶ms).unwrap(); + + let mut tx = account.get_l1_tx( + Execute { + contract_address: Some(L1_MESSENGER_ADDRESS), + value: 0.into(), + factory_deps: vec![], + calldata, + }, + 0, + ); + + if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { + // Using some large gas limit + data.gas_limit = 300_000_000.into(); + } else { + unreachable!() + }; + + vm.vm.push_transaction(tx); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(res.result.is_failed(), "The transaction should've failed"); +} diff --git a/core/lib/multivm/src/versions/testonly/l2_blocks.rs b/core/lib/multivm/src/versions/testonly/l2_blocks.rs new file mode 100644 index 00000000000..634a9b34bf6 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/l2_blocks.rs @@ -0,0 +1,416 @@ +//! +//! Tests for the bootloader +//! The description for each of the tests can be found in the corresponding `.yul` file. +//! + +use assert_matches::assert_matches; +use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; +use zksync_types::{ + block::{pack_block_info, L2BlockHasher}, + AccountTreeId, Address, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, + L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, H256, SYSTEM_CONTEXT_ADDRESS, + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use super::{default_l1_batch, get_empty_storage, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{ + storage::StorageView, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, + VmInterfaceExt, + }, + vm_latest::{ + constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, + utils::l2_blocks::get_l2_block_hash_key, + }, +}; + +fn get_l1_noop() -> Transaction { + Transaction { + common_data: ExecuteTransactionCommon::L1(L1TxCommonData { + sender: Address::repeat_byte(1), + gas_limit: U256::from(2000000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute: Execute { + contract_address: Some(Address::repeat_byte(0xc0)), + calldata: vec![], + value: U256::zero(), + factory_deps: vec![], + }, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +pub(crate) fn test_l2_block_initialization_timestamp() { + // This test checks that the L2 block initialization works correctly. + // Here we check that the first block must have timestamp that is greater or equal to the timestamp + // of the current batch. + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + // Override the timestamp of the current L2 block to be 0. + vm.vm.push_l2_block_unchecked(L2BlockEnv { + number: 1, + timestamp: 0, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }); + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert_matches!( + res.result, + ExecutionResult::Halt { reason: Halt::FailedToSetL2Block(msg) } + if msg.contains("timestamp") + ); +} + +pub(crate) fn test_l2_block_initialization_number_non_zero() { + // This test checks that the L2 block initialization works correctly. + // Here we check that the first L2 block number can not be zero. + + let l1_batch = default_l1_batch(L1BatchNumber(1)); + let first_l2_block = L2BlockEnv { + number: 0, + timestamp: l1_batch.timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_rich_accounts(1) + .build::(); + + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + + set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + res.result, + ExecutionResult::Halt { + reason: Halt::FailedToSetL2Block( + "L2 block number is never expected to be zero".to_string() + ) + } + ); +} + +fn test_same_l2_block( + expected_error: Option, + override_timestamp: Option, + override_prev_block_hash: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_rich_accounts(1) + .build::(); + + let l1_tx = get_l1_noop(); + vm.vm.push_transaction(l1_tx.clone()); + let res = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!res.result.is_failed()); + + let mut current_l2_block = vm.l1_batch_env.first_l2_block; + + if let Some(timestamp) = override_timestamp { + current_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = override_prev_block_hash { + current_l2_block.prev_block_hash = prev_block_hash; + } + + if (None, None) == (override_timestamp, override_prev_block_hash) { + current_l2_block.max_virtual_blocks_to_create = 0; + } + + vm.vm.push_transaction(l1_tx); + set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +pub(crate) fn test_l2_block_same_l2_block() { + // This test aims to test the case when there are multiple transactions inside the same L2 block. + + // Case 1: Incorrect timestamp + test_same_l2_block::( + Some(Halt::FailedToSetL2Block( + "The timestamp of the same L2 block must be same".to_string(), + )), + Some(0), + None, + ); + + // Case 2: Incorrect previous block hash + test_same_l2_block::( + Some(Halt::FailedToSetL2Block( + "The previous hash of the same L2 block must be same".to_string(), + )), + None, + Some(H256::zero()), + ); + + // Case 3: Correct continuation of the same L2 block + test_same_l2_block::(None, None, None); +} + +fn test_new_l2_block( + first_l2_block: L2BlockEnv, + overriden_second_block_number: Option, + overriden_second_block_timestamp: Option, + overriden_second_block_prev_block_hash: Option, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + l1_batch.first_l2_block = first_l2_block; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let l1_tx = get_l1_noop(); + + // Firstly we execute the first transaction + vm.vm.push_transaction(l1_tx.clone()); + vm.vm.execute(VmExecutionMode::OneTx); + + let mut second_l2_block = vm.l1_batch_env.first_l2_block; + second_l2_block.number += 1; + second_l2_block.timestamp += 1; + second_l2_block.prev_block_hash = vm.vm.last_l2_block_hash(); + + if let Some(block_number) = overriden_second_block_number { + second_l2_block.number = block_number; + } + if let Some(timestamp) = overriden_second_block_timestamp { + second_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { + second_l2_block.prev_block_hash = prev_block_hash; + } + + vm.vm.push_l2_block_unchecked(second_l2_block); + vm.vm.push_transaction(l1_tx); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +pub(crate) fn test_l2_block_new_l2_block() { + // This test is aimed to cover potential issue + + let correct_first_block = L2BlockEnv { + number: 1, + timestamp: 1, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + // Case 1: Block number increasing by more than 1 + test_new_l2_block::( + correct_first_block, + Some(3), + None, + None, + Some(Halt::FailedToSetL2Block( + "Invalid new L2 block number".to_string(), + )), + ); + + // Case 2: Timestamp not increasing + test_new_l2_block::( + correct_first_block, + None, + Some(1), + None, + Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), + ); + + // Case 3: Incorrect previous block hash + test_new_l2_block::( + correct_first_block, + None, + None, + Some(H256::zero()), + Some(Halt::FailedToSetL2Block( + "The current L2 block hash is incorrect".to_string(), + )), + ); + + // Case 4: Correct new block + test_new_l2_block::(correct_first_block, None, None, None, None); +} + +#[allow(clippy::too_many_arguments)] +fn test_first_in_batch( + miniblock_timestamp: u64, + miniblock_number: u32, + pending_txs_hash: H256, + batch_timestamp: u64, + new_batch_timestamp: u64, + batch_number: u32, + proposed_block: L2BlockEnv, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.number += 1; + l1_batch.timestamp = new_batch_timestamp; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let l1_tx = get_l1_noop(); + + // Setting the values provided. + let miniblock_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let pending_txs_hash_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + let batch_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, + ); + let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); + + let mut storage = get_empty_storage(); + storage.set_value( + miniblock_info_slot, + u256_to_h256(pack_block_info( + miniblock_number as u64, + miniblock_timestamp, + )), + ); + storage.set_value(pending_txs_hash_slot, pending_txs_hash); + storage.set_value( + batch_info_slot, + u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), + ); + storage.set_value( + prev_block_hash_position, + L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), + ); + // Replace the storage entirely. It's not enough to write to the underlying storage (since read values are already cached + // in the storage view). + *vm.storage.borrow_mut() = StorageView::new(storage); + + // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. + // And then override it with the user-provided value + + let last_l2_block = vm.l1_batch_env.first_l2_block; + let new_l2_block = L2BlockEnv { + number: last_l2_block.number + 1, + timestamp: last_l2_block.timestamp + 1, + prev_block_hash: vm.vm.last_l2_block_hash(), + max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, + }; + + vm.vm.push_l2_block_unchecked(new_l2_block); + vm.vm.push_transaction(l1_tx); + set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +pub(crate) fn test_l2_block_first_in_batch() { + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) + .finalize(ProtocolVersionId::latest()); + test_first_in_batch::( + 1, + 1, + H256::zero(), + 1, + 2, + 1, + L2BlockEnv { + number: 2, + timestamp: 2, + prev_block_hash, + max_virtual_blocks_to_create: 1, + }, + None, + ); + + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) + .finalize(ProtocolVersionId::latest()); + test_first_in_batch::( + 8, + 1, + H256::zero(), + 5, + 12, + 1, + L2BlockEnv { + number: 2, + timestamp: 9, + prev_block_hash, + max_virtual_blocks_to_create: 1, + }, + Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), + ); +} + +fn set_manual_l2_block_info(vm: &mut impl TestedVm, tx_number: usize, block_info: L2BlockEnv) { + let fictive_miniblock_position = + TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; + vm.write_to_bootloader_heap(&[ + (fictive_miniblock_position, block_info.number.into()), + (fictive_miniblock_position + 1, block_info.timestamp.into()), + ( + fictive_miniblock_position + 2, + h256_to_u256(block_info.prev_block_hash), + ), + ( + fictive_miniblock_position + 3, + block_info.max_virtual_blocks_to_create.into(), + ), + ]) +} diff --git a/core/lib/multivm/src/versions/testonly/mod.rs b/core/lib/multivm/src/versions/testonly/mod.rs new file mode 100644 index 00000000000..838ba98a9aa --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/mod.rs @@ -0,0 +1,228 @@ +//! Reusable tests and tooling for low-level VM testing. +//! +//! # How it works +//! +//! - [`TestedVm`] defines test-specific VM extensions. It's currently implemented for the latest legacy VM +//! (`vm_latest`) and the fast VM (`vm_fast`). +//! - Submodules of this module define test functions generic by `TestedVm`. Specific VM versions implement `TestedVm` +//! and can create tests based on these test functions with minimum amount of boilerplate code. +//! - Tests use [`VmTester`] built using [`VmTesterBuilder`] to create a VM instance. This allows to set up storage for the VM, +//! custom [`SystemEnv`] / [`L1BatchEnv`], deployed contracts, pre-funded accounts etc. + +use ethabi::Contract; +use once_cell::sync::Lazy; +use zksync_contracts::{ + load_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, BaseSystemContracts, + SystemContractCode, +}; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, + utils::storage_key_for_eth_balance, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, u256_to_h256}; +use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; + +pub(super) use self::tester::{TestedVm, VmTester, VmTesterBuilder}; +use crate::{ + interface::storage::InMemoryStorage, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(super) mod block_tip; +pub(super) mod bootloader; +pub(super) mod bytecode_publishing; +pub(super) mod circuits; +pub(super) mod code_oracle; +pub(super) mod default_aa; +pub(super) mod gas_limit; +pub(super) mod get_used_contracts; +pub(super) mod is_write_initial; +pub(super) mod l1_tx_execution; +pub(super) mod l2_blocks; +pub(super) mod nonce_holder; +pub(super) mod precompiles; +pub(super) mod refunds; +pub(super) mod require_eip712; +pub(super) mod rollbacks; +pub(super) mod secp256r1; +mod shadow; +pub(super) mod simple_execution; +pub(super) mod storage; +mod tester; +pub(super) mod tracing_execution_error; +pub(super) mod transfer; +pub(super) mod upgrade; + +static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +fn get_empty_storage() -> InMemoryStorage { + InMemoryStorage::with_system_contracts(hash_bytecode) +} + +pub(crate) fn read_test_contract() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") +} + +fn get_complex_upgrade_abi() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" + ) +} + +fn read_complex_upgrade() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") +} + +fn read_precompiles_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +fn load_precompiles_contract() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +fn read_proxy_counter_contract() -> (Vec, Contract) { + const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; + (read_bytecode(PATH), load_contract(PATH)) +} + +fn read_nonce_holder_tester() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") +} + +fn read_expensive_contract() -> (Vec, Contract) { + const PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; + (read_bytecode(PATH), load_contract(PATH)) +} + +fn read_many_owners_custom_account_contract() -> (Vec, Contract) { + let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; + (read_bytecode(path), load_contract(path)) +} + +fn read_error_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ) +} + +pub(crate) fn read_max_depth_contract() -> Vec { + read_zbin_bytecode( + "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", + ) +} + +pub(crate) fn read_simple_transfer_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/simple-transfer/simple-transfer.sol/SimpleTransfer.json", + ) +} + +pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { + let bootloader_code = read_bootloader_code(test); + let bootloader_hash = hash_bytecode(&bootloader_code); + SystemContractCode { + code: bytes_to_be_words(bootloader_code), + hash: bootloader_hash, + } +} + +pub(super) fn default_system_env() -> SystemEnv { + SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + } +} + +pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + // Add a bias to the timestamp to make it more realistic / "random". + let timestamp = 1_700_000_000 + u64::from(number.0); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::repeat_byte(1), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(super) fn make_address_rich(storage: &mut InMemoryStorage, address: Address) { + let key = storage_key_for_eth_balance(&address); + storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); +} + +#[derive(Debug, Clone)] +pub(super) struct ContractToDeploy { + bytecode: Vec, + address: Address, + is_account: bool, + is_funded: bool, +} + +impl ContractToDeploy { + pub fn new(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: false, + is_funded: false, + } + } + + pub fn account(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: true, + is_funded: false, + } + } + + #[must_use] + pub fn funded(mut self) -> Self { + self.is_funded = true; + self + } + + pub fn insert(&self, storage: &mut InMemoryStorage) { + let deployer_code_key = get_code_key(&self.address); + storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); + if self.is_account { + let is_account_key = get_is_account_key(&self.address); + storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); + + if self.is_funded { + make_address_rich(storage, self.address); + } + } + + /// Inserts the contracts into the test environment, bypassing the deployer system contract. + pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { + for contract in contracts { + contract.insert(storage); + } + } +} diff --git a/core/lib/multivm/src/versions/testonly/nonce_holder.rs b/core/lib/multivm/src/versions/testonly/nonce_holder.rs new file mode 100644 index 00000000000..8ef120c693c --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/nonce_holder.rs @@ -0,0 +1,200 @@ +use zksync_test_account::Account; +use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; + +use super::{read_nonce_holder_tester, tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{ + ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, + VmRevertReason, +}; + +pub enum NonceHolderTestMode { + SetValueUnderNonce, + IncreaseMinNonceBy5, + IncreaseMinNonceTooMuch, + LeaveNonceUnused, + IncreaseMinNonceBy1, + SwitchToArbitraryOrdering, +} + +impl From for u8 { + fn from(mode: NonceHolderTestMode) -> u8 { + match mode { + NonceHolderTestMode::SetValueUnderNonce => 0, + NonceHolderTestMode::IncreaseMinNonceBy5 => 1, + NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, + NonceHolderTestMode::LeaveNonceUnused => 3, + NonceHolderTestMode::IncreaseMinNonceBy1 => 4, + NonceHolderTestMode::SwitchToArbitraryOrdering => 5, + } + } +} + +fn run_nonce_test( + vm: &mut impl TestedVm, + account: &mut Account, + nonce: u32, + test_mode: NonceHolderTestMode, + error_message: Option, + comment: &'static str, +) { + vm.make_snapshot(); + let mut transaction = account.get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: Some(account.address), + calldata: vec![12], + value: Default::default(), + factory_deps: vec![], + }, + None, + Nonce(nonce), + ); + let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { + unreachable!(); + }; + tx_data.signature = vec![test_mode.into()]; + vm.push_transaction(transaction); + let result = vm.execute(VmExecutionMode::OneTx); + + if let Some(msg) = error_message { + let expected_error = + TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { + msg, + data: vec![], + })); + let ExecutionResult::Halt { reason } = &result.result else { + panic!("Expected revert, got {:?}", result.result); + }; + assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); + vm.rollback_to_the_latest_snapshot(); + } else { + assert!(!result.result.is_failed(), "{}", comment); + vm.pop_snapshot_no_rollback(); + } +} + +pub(crate) fn test_nonce_holder() { + let builder = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1); + let account_address = builder.rich_account(0).address; + let mut vm = builder + .with_custom_contracts(vec![ContractToDeploy::account( + read_nonce_holder_tester(), + account_address, + )]) + .build::(); + let account = &mut vm.rich_accounts[0]; + let hex_addr = hex::encode(account.address.to_fixed_bytes()); + + // Test 1: trying to set value under non sequential nonce value. + run_nonce_test( + &mut vm.vm, + account, + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), + "Allowed to set value under non sequential value", + ); + + // Test 2: increase min nonce by 1 with sequential nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 0u32, + NonceHolderTestMode::IncreaseMinNonceBy1, + None, + "Failed to increment nonce by 1 for sequential account", + ); + + // Test 3: correctly set value under nonce with sequential nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Failed to set value under nonce sequential value", + ); + + // Test 5: migrate to the arbitrary nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 2u32, + NonceHolderTestMode::SwitchToArbitraryOrdering, + None, + "Failed to switch to arbitrary ordering", + ); + + // Test 6: increase min nonce by 5 + run_nonce_test( + &mut vm.vm, + account, + 6u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Failed to increase min nonce by 5", + ); + + // Test 7: since the nonces in range [6,10] are no longer allowed, the + // tx with nonce 10 should not be allowed + run_nonce_test( + &mut vm.vm, + account, + 10u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), + "Allowed to reuse nonce below the minimal one", + ); + + // Test 8: we should be able to use nonce 13 + run_nonce_test( + &mut vm.vm, + account, + 13u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Did not allow to use unused nonce 10", + ); + + // Test 9: we should not be able to reuse nonce 13 + run_nonce_test( + &mut vm.vm, + account, + 13u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), + "Allowed to reuse the same nonce twice", + ); + + // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 + run_nonce_test( + &mut vm.vm, + account, + 14u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Did not allow to use a bumped nonce", + ); + + // Test 11: Do not allow bumping nonce by too much + run_nonce_test( + &mut vm.vm, + account, + 16u32, + NonceHolderTestMode::IncreaseMinNonceTooMuch, + Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), + "Allowed for incrementing min nonce too much", + ); + + // Test 12: Do not allow not setting a nonce as used + run_nonce_test( + &mut vm.vm, + account, + 16u32, + NonceHolderTestMode::LeaveNonceUnused, + Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), + "Allowed to leave nonce as unused", + ); +} diff --git a/core/lib/multivm/src/versions/testonly/precompiles.rs b/core/lib/multivm/src/versions/testonly/precompiles.rs new file mode 100644 index 00000000000..270afab0731 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/precompiles.rs @@ -0,0 +1,110 @@ +use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; +use zksync_types::{Address, Execute}; + +use super::{read_precompiles_contract, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + versions::testonly::ContractToDeploy, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(crate) fn test_keccak() { + // Execute special transaction and check that at least 1000 keccak calls were made. + let contract = read_precompiles_contract(); + let address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); + + // calldata for `doKeccak(1000)`. + let keccak1000_calldata = + "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: hex::decode(keccak1000_calldata).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let keccak_count = exec_result.statistics.circuit_statistic.keccak256 + * get_geometry_config().cycles_per_keccak256_circuit as f32; + assert!(keccak_count >= 1000.0, "{keccak_count}"); +} + +pub(crate) fn test_sha256() { + // Execute special transaction and check that at least 1000 `sha256` calls were made. + let contract = read_precompiles_contract(); + let address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); + + // calldata for `doSha256(1000)`. + let sha1000_calldata = + "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: hex::decode(sha1000_calldata).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let sha_count = exec_result.statistics.circuit_statistic.sha256 + * get_geometry_config().cycles_per_sha256_circuit as f32; + assert!(sha_count >= 1000.0, "{sha_count}"); +} + +pub(crate) fn test_ecrecover() { + // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(account.address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover + * get_geometry_config().cycles_per_ecrecover_circuit as f32; + assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); +} diff --git a/core/lib/multivm/src/versions/testonly/refunds.rs b/core/lib/multivm/src/versions/testonly/refunds.rs new file mode 100644 index 00000000000..565607dff10 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/refunds.rs @@ -0,0 +1,215 @@ +use ethabi::Token; +use zksync_test_account::TxType; +use zksync_types::{Address, Execute, U256}; + +use super::{ + read_expensive_contract, read_test_contract, tester::VmTesterBuilder, ContractToDeploy, + TestedVm, +}; +use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_predetermined_refunded_gas() { + // In this test, we compare the execution of the bootloader with the predefined + // refunded gas and without them + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let l1_batch = vm.l1_batch_env.clone(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; + vm.vm.push_transaction(tx.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(!result.result.is_failed()); + + // If the refund provided by the operator or the final refund are the 0 + // there is no impact of the operator's refund at all and so this test does not + // make much sense. + assert!( + result.refunds.operator_suggested_refund > 0, + "The operator's refund is 0" + ); + assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); + + let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); + assert!(!result_without_predefined_refunds.result.is_failed(),); + + // Here we want to provide the same refund from the operator and check that it's the correct one. + // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. + // But the overall result should be the same + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + assert_eq!(account.address(), vm.rich_accounts[0].address()); + + vm.vm + .push_transaction_with_refund(tx.clone(), result.refunds.gas_refunded); + + let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result_with_predefined_refunds.result.is_failed()); + + // We need to sort these lists as those are flattened from HashMaps + current_state_with_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_predefined_refunds.user_l2_to_l1_logs, + current_state_without_predefined_refunds.user_l2_to_l1_logs + ); + + assert_eq!( + current_state_with_predefined_refunds.system_logs, + current_state_without_predefined_refunds.system_logs + ); + + assert_eq!( + current_state_with_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs + ); + assert_eq!( + current_state_with_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); + + // In this test we put the different refund from the operator. + // We still can't use the refund tracer, because it will override the refund. + // But we can check that the logs and events have changed. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + assert_eq!(account.address(), vm.rich_accounts[0].address()); + + let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; + vm.vm + .push_transaction_with_refund(tx, changed_operator_suggested_refund); + let result = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result.result.is_failed()); + current_state_with_changed_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_changed_predefined_refunds.events.len(), + current_state_without_predefined_refunds.events.len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, + current_state_without_predefined_refunds.user_l2_to_l1_logs + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.system_logs, + current_state_without_predefined_refunds.system_logs + ); + + assert_eq!( + current_state_with_changed_predefined_refunds + .deduplicated_storage_logs + .len(), + current_state_without_predefined_refunds + .deduplicated_storage_logs + .len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs + ); + assert_eq!( + current_state_with_changed_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); +} + +pub(crate) fn test_negative_pubdata_for_transaction() { + let expensive_contract_address = Address::repeat_byte(1); + let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); + let expensive_function = expensive_contract.function("expensive").unwrap(); + let cleanup_function = expensive_contract.function("cleanUp").unwrap(); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + expensive_contract_bytecode, + expensive_contract_address, + )]) + .build::(); + + let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(expensive_contract_address), + calldata: expensive_function + .encode_input(&[Token::Uint(10.into())]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(expensive_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. + let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(expensive_contract_address), + calldata: cleanup_function.encode_input(&[]).unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(clean_up_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + assert!(result.refunds.operator_suggested_refund > 0); + assert_eq!( + result.refunds.gas_refunded, + result.refunds.operator_suggested_refund + ); +} diff --git a/core/lib/multivm/src/versions/testonly/require_eip712.rs b/core/lib/multivm/src/versions/testonly/require_eip712.rs new file mode 100644 index 00000000000..1ea3964d7cd --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/require_eip712.rs @@ -0,0 +1,146 @@ +use ethabi::Token; +use zksync_eth_signer::TransactionParameters; +use zksync_types::{ + fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, Address, Eip712Domain, Execute, + L2ChainId, Nonce, Transaction, U256, +}; + +use super::{ + read_many_owners_custom_account_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, +}; +use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; + +/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy +/// and EIP712 transactions. +/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. +pub(crate) fn test_require_eip712() { + // Use 3 accounts: + // - `private_address` - EOA account, where we have the key + // - `account_address` - AA account, where the contract is deployed + // - beneficiary - an EOA account, where we'll try to transfer the tokens. + let aa_address = Address::repeat_byte(0x10); + let beneficiary_address = Address::repeat_byte(0x20); + + let (bytecode, contract) = read_many_owners_custom_account_contract(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![ + ContractToDeploy::account(bytecode, aa_address).funded() + ]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + assert_eq!(vm.get_eth_balance(beneficiary_address), U256::from(0)); + let chain_id: u32 = 270; + let mut private_account = vm.rich_accounts[0].clone(); + + // First, let's set the owners of the AA account to the `private_address`. + // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). + let set_owners_function = contract.function("setOwners").unwrap(); + let encoded_input = set_owners_function + .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) + .unwrap(); + + let tx = private_account.get_l2_tx_for_execute( + Execute { + contract_address: Some(aa_address), + calldata: encoded_input, + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + let private_account_balance = vm.get_eth_balance(private_account.address); + + // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). + // Normally this would not work - unless the operator is malicious. + let aa_raw_tx = TransactionParameters { + nonce: U256::from(0), + to: Some(beneficiary_address), + gas: U256::from(100000000), + gas_price: Some(U256::from(10000000)), + value: U256::from(888000088), + data: vec![], + chain_id: 270, + transaction_type: None, + access_list: None, + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + max_fee_per_blob_gas: None, + blob_versioned_hashes: None, + }; + + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); + let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); + + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); + l2_tx.set_input(aa_tx, hash); + // Pretend that operator is malicious and sets the initiator to the AA account. + l2_tx.common_data.initiator_address = aa_address; + let transaction: Transaction = l2_tx.into(); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert_eq!( + vm.get_eth_balance(beneficiary_address), + U256::from(888000088) + ); + // Make sure that the tokens were transferred from the AA account. + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); + + // // Now send the 'classic' EIP712 transaction + let tx_712 = L2Tx::new( + Some(beneficiary_address), + vec![], + Nonce(1), + Fee { + gas_limit: U256::from(1000000000), + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + gas_per_pubdata_limit: U256::from(1000000000), + }, + aa_address, + U256::from(28374938), + vec![], + Default::default(), + ); + + let mut transaction_request: TransactionRequest = tx_712.into(); + transaction_request.chain_id = Some(chain_id.into()); + + let domain = Eip712Domain::new(L2ChainId::from(chain_id)); + let signature = private_account + .get_pk_signer() + .sign_typed_data(&domain, &transaction_request) + .unwrap(); + let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); + + let (aa_txn_request, aa_hash) = + TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); + + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); + l2_tx.set_input(encoded_tx, aa_hash); + + let transaction: Transaction = l2_tx.into(); + vm.vm.push_transaction(transaction); + vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + vm.get_eth_balance(beneficiary_address), + U256::from(916375026) + ); + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); +} diff --git a/core/lib/multivm/src/versions/testonly/rollbacks.rs b/core/lib/multivm/src/versions/testonly/rollbacks.rs new file mode 100644 index 00000000000..cab3427899e --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/rollbacks.rs @@ -0,0 +1,212 @@ +use std::collections::HashMap; + +use assert_matches::assert_matches; +use ethabi::Token; +use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; +use zksync_test_account::{DeployContractsTx, TxType}; +use zksync_types::{Address, Execute, Nonce, U256}; + +use super::{ + read_test_contract, + tester::{TransactionTestInfo, TxModifier, VmTesterBuilder}, + ContractToDeploy, TestedVm, +}; +use crate::interface::{storage::ReadStorage, ExecutionResult, TxExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_vm_rollbacks() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let mut account = vm.rich_accounts[0].clone(); + let counter = read_test_contract(); + let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(tx_0.clone(), false), + TransactionTestInfo::new_processed(tx_1.clone(), false), + TransactionTestInfo::new_processed(tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), + // The correct nonce is 0, this tx will fail + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), + // This tx will succeed + TransactionTestInfo::new_processed(tx_0.clone(), false), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), + // This tx will succeed + TransactionTestInfo::new_processed(tx_1, false), + // The correct nonce is 2, this tx will fail + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), + // This tx will succeed + TransactionTestInfo::new_processed(tx_2.clone(), false), + // This tx will fail + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), + ]); + + pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); +} + +pub(crate) fn test_vm_loadnext_rollbacks() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let mut account = vm.rich_accounts[0].clone(); + + let loadnext_contract = get_loadnext_contract(); + let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; + let DeployContractsTx { + tx: loadnext_deploy_tx, + address, + .. + } = account.get_deploy_tx_with_factory_deps( + &loadnext_contract.bytecode, + Some(loadnext_constructor_data), + loadnext_contract.factory_deps.clone(), + TxType::L2, + ); + + let loadnext_tx_1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: LoadnextContractExecutionParams { + reads: 100, + writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + let loadnext_tx_2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: LoadnextContractExecutionParams { + reads: 100, + writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), + TransactionTestInfo::new_processed(loadnext_tx_1, false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), + TransactionTestInfo::new_processed(loadnext_tx_2, false), + ]); + + assert_eq!(result_without_rollbacks, result_with_rollbacks); +} + +pub(crate) fn test_rollback_in_call_mode() { + let counter_bytecode = read_test_contract(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + + let storage_logs = &vm_result.logs.storage_logs; + let deduplicated_logs = storage_logs + .iter() + .filter_map(|log| log.log.is_write().then_some((log.log.key, log.log.value))); + let deduplicated_logs: HashMap<_, _> = deduplicated_logs.collect(); + // Check that all storage changes are reverted + let mut storage = vm.storage.borrow_mut(); + for (key, value) in deduplicated_logs { + assert_eq!(storage.inner_mut().read_value(&key), value); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/testonly/secp256r1.rs similarity index 91% rename from core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs rename to core/lib/multivm/src/versions/testonly/secp256r1.rs index 55ca372c4a9..60197913601 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/testonly/secp256r1.rs @@ -3,21 +3,18 @@ use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; use zksync_types::{web3::keccak256, Execute, H256, U256}; use zksync_utils::h256_to_u256; -use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::tester::VmTesterBuilder, -}; +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; -#[test] -fn test_sekp256r1() { +pub(crate) fn test_secp256r1() { // In this test, we aim to test whether a simple account interaction (without any fee logic) // will work. The account will try to deploy a simple contract from integration tests. let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_execution_mode(TxExecutionMode::EthCall) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; diff --git a/core/lib/multivm/src/versions/tests.rs b/core/lib/multivm/src/versions/testonly/shadow.rs similarity index 96% rename from core/lib/multivm/src/versions/tests.rs rename to core/lib/multivm/src/versions/testonly/shadow.rs index c2a04c155fe..6a7d42b06fc 100644 --- a/core/lib/multivm/src/versions/tests.rs +++ b/core/lib/multivm/src/versions/testonly/shadow.rs @@ -22,10 +22,10 @@ use crate::{ }, utils::get_max_gas_per_pubdata_byte, versions::testonly::{ - default_l1_batch, default_system_env, make_account_rich, ContractToDeploy, + default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, }, - vm_fast, - vm_latest::{self, HistoryEnabled}, + vm_fast, vm_latest, + vm_latest::HistoryEnabled, }; type ReferenceVm = vm_latest::Vm, HistoryEnabled>; @@ -70,8 +70,8 @@ impl Harness { fn new(l1_batch_env: &L1BatchEnv) -> Self { Self { - alice: Account::random(), - bob: Account::random(), + alice: Account::from_seed(0), + bob: Account::from_seed(1), storage_contract: ContractToDeploy::new( read_bytecode(Self::STORAGE_CONTRACT_PATH), Self::STORAGE_CONTRACT_ADDRESS, @@ -82,8 +82,8 @@ impl Harness { } fn setup_storage(&self, storage: &mut InMemoryStorage) { - make_account_rich(storage, &self.alice); - make_account_rich(storage, &self.bob); + make_address_rich(storage, self.alice.address); + make_address_rich(storage, self.bob.address); self.storage_contract.insert(storage); let storage_contract_key = StorageKey::new( diff --git a/core/lib/multivm/src/versions/testonly/simple_execution.rs b/core/lib/multivm/src/versions/testonly/simple_execution.rs new file mode 100644 index 00000000000..fcd7a144ab1 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/simple_execution.rs @@ -0,0 +1,75 @@ +use assert_matches::assert_matches; +use zksync_test_account::TxType; + +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, VmExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_estimate_fee() { + let mut vm_tester = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .build::(); + + vm_tester.deploy_test_contract(); + let account = &mut vm_tester.rich_accounts[0]; + + let tx = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L2, + ); + + vm_tester.vm.push_transaction(tx); + + let result = vm_tester.vm.execute(VmExecutionMode::OneTx); + assert_matches!(result.result, ExecutionResult::Success { .. }); +} + +pub(crate) fn test_simple_execute() { + let mut vm_tester = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .build::(); + + vm_tester.deploy_test_contract(); + + let account = &mut vm_tester.rich_accounts[0]; + + let tx1 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx2 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + true, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx3 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + let vm = &mut vm_tester.vm; + vm.push_transaction(tx1); + vm.push_transaction(tx2); + vm.push_transaction(tx3); + let tx = vm.execute(VmExecutionMode::OneTx); + assert_matches!(tx.result, ExecutionResult::Success { .. }); + let tx = vm.execute(VmExecutionMode::OneTx); + assert_matches!(tx.result, ExecutionResult::Revert { .. }); + let tx = vm.execute(VmExecutionMode::OneTx); + assert_matches!(tx.result, ExecutionResult::Success { .. }); + let block_tip = vm.execute(VmExecutionMode::Batch); + assert_matches!(block_tip.result, ExecutionResult::Success { .. }); +} diff --git a/core/lib/multivm/src/versions/testonly/storage.rs b/core/lib/multivm/src/versions/testonly/storage.rs new file mode 100644 index 00000000000..4951272a60c --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/storage.rs @@ -0,0 +1,125 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_types::{Address, Execute, U256}; + +use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; + +fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { + let bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let test_contract_address = Address::repeat_byte(1); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata: first_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata: second_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "First tx failed"); + vm.vm.pop_snapshot_no_rollback(); + + // We rollback once because transient storage and rollbacks are a tricky combination. + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed"); + vm.vm.rollback_to_the_latest_snapshot(); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed on second run"); + + result.statistics.pubdata_published +} + +fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { + test_storage::(vec![], second_tx_calldata) +} + +pub(crate) fn test_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + // In all of the tests below we provide the first tx to ensure that the tracers will not include + // the statistics from the start of the bootloader and will only include those for the transaction itself. + + let base_pubdata = test_storage_one_tx::(vec![]); + let simple_test_pubdata = test_storage_one_tx::( + contract + .function("simpleWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_pubdata = test_storage_one_tx::( + contract + .function("resettingWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_via_revert_pubdata = test_storage_one_tx::( + contract + .function("resettingWriteViaRevert") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + + assert_eq!(simple_test_pubdata - base_pubdata, 65); + assert_eq!(resetting_write_pubdata - base_pubdata, 34); + assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +} + +pub(crate) fn test_transient_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let first_tstore_test = contract + .function("testTransientStore") + .unwrap() + .encode_input(&[]) + .unwrap(); + // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. + let second_tstore_test = contract + .function("assertTValue") + .unwrap() + .encode_input(&[Token::Uint(U256::zero())]) + .unwrap(); + + test_storage::(first_tstore_test, second_tstore_test); +} diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs new file mode 100644 index 00000000000..4bab9bca610 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -0,0 +1,229 @@ +use std::{collections::HashSet, fmt}; + +use zksync_contracts::BaseSystemContracts; +use zksync_test_account::{Account, TxType}; +use zksync_types::{ + utils::{deployed_address_create, storage_key_for_eth_balance}, + writes::StateDiffRecord, + Address, L1BatchNumber, StorageKey, Transaction, H256, U256, +}; +use zksync_vm_interface::{ + CurrentExecutionState, VmExecutionResultAndLogs, VmInterfaceHistoryEnabled, +}; + +pub(crate) use self::transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; +use super::{get_empty_storage, read_test_contract}; +use crate::{ + interface::{ + storage::{InMemoryStorage, StoragePtr, StorageView}, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, + VmInterfaceExt, + }, + versions::testonly::{ + default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, + }, +}; + +mod transaction_test_info; + +/// VM tester that provides prefunded accounts, storage handle etc. +#[derive(Debug)] +pub(crate) struct VmTester { + pub(crate) vm: VM, + pub(crate) system_env: SystemEnv, + pub(crate) l1_batch_env: L1BatchEnv, + pub(crate) storage: StoragePtr>, + pub(crate) test_contract: Option
, + pub(crate) rich_accounts: Vec, +} + +impl VmTester { + pub(crate) fn deploy_test_contract(&mut self) { + let contract = read_test_contract(); + let account = &mut self.rich_accounts[0]; + let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; + let nonce = tx.nonce().unwrap().0.into(); + self.vm.push_transaction(tx); + self.vm.execute(VmExecutionMode::OneTx); + let deployed_address = deployed_address_create(account.address, nonce); + self.test_contract = Some(deployed_address); + } + + pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { + self.vm.read_storage(storage_key_for_eth_balance(&address)) + } + + pub(crate) fn reset_with_empty_storage(&mut self) { + let mut storage = get_empty_storage(); + for account in &self.rich_accounts { + make_address_rich(&mut storage, account.address); + } + + let storage = StorageView::new(storage).to_rc_ptr(); + self.storage = storage.clone(); + self.vm = VM::new(self.l1_batch_env.clone(), self.system_env.clone(), storage); + } +} + +/// Builder for [`VmTester`]. +#[derive(Debug)] +pub(crate) struct VmTesterBuilder { + storage: Option, + l1_batch_env: Option, + system_env: SystemEnv, + rich_accounts: Vec, + custom_contracts: Vec, +} + +impl VmTesterBuilder { + pub(crate) fn new() -> Self { + Self { + storage: None, + l1_batch_env: None, + system_env: default_system_env(), + rich_accounts: vec![], + custom_contracts: vec![], + } + } + + pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { + self.system_env = system_env; + self + } + + pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { + self.l1_batch_env = Some(l1_batch_env); + self + } + + pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { + self.storage = Some(storage); + self + } + + pub(crate) fn with_base_system_smart_contracts( + mut self, + base_system_smart_contracts: BaseSystemContracts, + ) -> Self { + self.system_env.base_system_smart_contracts = base_system_smart_contracts; + self + } + + pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { + self.system_env.bootloader_gas_limit = gas_limit; + self + } + + pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { + self.system_env.execution_mode = execution_mode; + self + } + + pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { + self.storage = Some(get_empty_storage()); + self + } + + /// Creates the specified number of pre-funded accounts. + pub(crate) fn with_rich_accounts(mut self, number: u32) -> Self { + for i in 0..number { + self.rich_accounts.push(Account::from_seed(i)); + } + self + } + + pub(crate) fn rich_account(&self, index: usize) -> &Account { + &self.rich_accounts[index] + } + + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + self.custom_contracts = contracts; + self + } + + pub(crate) fn build(self) -> VmTester + where + VM: VmFactory>, + { + let l1_batch_env = self + .l1_batch_env + .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); + + let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); + ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); + let storage = StorageView::new(raw_storage).to_rc_ptr(); + for account in &self.rich_accounts { + make_address_rich(storage.borrow_mut().inner_mut(), account.address); + } + + let vm = VM::new( + l1_batch_env.clone(), + self.system_env.clone(), + storage.clone(), + ); + VmTester { + vm, + system_env: self.system_env, + l1_batch_env, + storage, + test_contract: None, + rich_accounts: self.rich_accounts.clone(), + } + } +} + +/// Test extensions for VM. +pub(crate) trait TestedVm: + VmFactory> + VmInterfaceHistoryEnabled +{ + type StateDump: fmt::Debug + PartialEq; + + fn dump_state(&self) -> Self::StateDump; + + fn gas_remaining(&mut self) -> u32; + + fn get_current_execution_state(&self) -> CurrentExecutionState; + + /// Unlike [`Self::known_bytecode_hashes()`], the output should only include successfully decommitted bytecodes. + fn decommitted_hashes(&self) -> HashSet; + + fn execute_with_state_diffs( + &mut self, + diffs: Vec, + mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs; + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]); + + /// Includes bytecodes that have failed to decommit. + fn known_bytecode_hashes(&self) -> HashSet; + + /// Returns `true` iff the decommit is fresh. + fn manually_decommit(&mut self, code_hash: H256) -> bool; + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]); + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]); + + /// Reads storage accounting for changes made during the VM run. + fn read_storage(&mut self, key: StorageKey) -> U256; + + fn verify_required_storage(&mut self, cells: &[(StorageKey, U256)]) { + for &(key, expected_value) in cells { + assert_eq!( + self.read_storage(key), + expected_value, + "Unexpected storage value at {key:?}" + ); + } + } + + /// Returns the current hash of the latest L2 block. + fn last_l2_block_hash(&self) -> H256; + + /// Same as `start_new_l2_block`, but should skip consistency checks (to verify they are performed by the bootloader). + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv); + + /// Pushes a transaction with predefined refund value. + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs similarity index 87% rename from core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs rename to core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs index e2155c02b7e..87468d3e4d5 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs @@ -1,12 +1,9 @@ use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160}; -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmRevertReason, - }, - vm_latest::{tests::tester::vm_tester::VmTester, HistoryEnabled}, +use super::{TestedVm, VmTester}; +use crate::interface::{ + CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, + VmExecutionResultAndLogs, VmInterfaceExt, VmRevertReason, }; #[derive(Debug, Clone)] @@ -176,7 +173,7 @@ impl TransactionTestInfo { } } -impl VmTester { +impl VmTester { pub(crate) fn execute_and_verify_txs( &mut self, txs: &[TransactionTestInfo], @@ -194,19 +191,29 @@ impl VmTester { &mut self, tx_test_info: TransactionTestInfo, ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result + execute_tx_and_verify(&mut self.vm, tx_test_info) + } +} + +fn execute_tx_and_verify( + vm: &mut impl TestedVm, + tx_test_info: TransactionTestInfo, +) -> VmExecutionResultAndLogs { + let inner_state_before = vm.dump_state(); + vm.make_snapshot(); + vm.push_transaction(tx_test_info.tx.clone()); + let result = vm.execute(VmExecutionMode::OneTx); + tx_test_info.verify_result(&result); + if tx_test_info.should_rollback() { + vm.rollback_to_the_latest_snapshot(); + let inner_state_after = vm.dump_state(); + pretty_assertions::assert_eq!( + inner_state_before, + inner_state_after, + "Inner state before and after rollback should be equal" + ); + } else { + vm.pop_snapshot_no_rollback(); } + result } diff --git a/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs b/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs new file mode 100644 index 00000000000..e87e6eb7c06 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs @@ -0,0 +1,63 @@ +use zksync_contracts::load_contract; +use zksync_types::{Address, Execute}; + +use super::{ + read_error_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, BASE_SYSTEM_CONTRACTS, +}; +use crate::{ + interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, + versions::testonly::tester::{ExpectedError, TransactionTestInfo}, +}; + +fn get_execute_error_calldata() -> Vec { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ); + let function = test_contract.function("require_short").unwrap(); + function + .encode_input(&[]) + .expect("failed to encode parameters") +} + +pub(crate) fn test_tracing_of_execution_errors() { + let contract_address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_custom_contracts(vec![ContractToDeploy::new( + read_error_contract(), + contract_address, + )]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: get_execute_error_calldata(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( + tx, + ExpectedError { + revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { + msg: "short".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + ], + }), + modifier: None, + }, + )); +} diff --git a/core/lib/multivm/src/versions/testonly/transfer.rs b/core/lib/multivm/src/versions/testonly/transfer.rs new file mode 100644 index 00000000000..051826a64f2 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/transfer.rs @@ -0,0 +1,200 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_types::{utils::storage_key_for_eth_balance, Address, Execute, U256}; +use zksync_utils::u256_to_h256; + +use super::{get_empty_storage, tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; + +enum TestOptions { + Send(U256), + Transfer(U256), +} + +fn test_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let recipient_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + + let test_contract_address = Address::repeat_byte(1); + let recipient_address = Address::repeat_byte(2); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + }; + + let mut storage = get_empty_storage(); + storage.set_value( + storage_key_for_eth_balance(&test_contract_address), + u256_to_h256(value), + ); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(recipient_bytecode, recipient_address), + ]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let tx_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !tx_result.result.is_failed(), + "Transaction wasn't successful" + ); + + let batch_result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); + + let new_recipient_balance = vm.get_eth_balance(recipient_address); + assert_eq!(new_recipient_balance, value); +} + +pub(crate) fn test_send_and_transfer() { + test_send_or_transfer::(TestOptions::Send(U256::zero())); + test_send_or_transfer::(TestOptions::Send(U256::from(10).pow(18.into()))); + test_send_or_transfer::(TestOptions::Transfer(U256::zero())); + test_send_or_transfer::(TestOptions::Transfer(U256::from(10).pow(18.into()))); +} + +fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipient_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipient_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + + let test_contract_address = Address::repeat_byte(1); + let reentrant_recipient_address = Address::repeat_byte(2); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipient_address), + Token::Uint(value), + ]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipient_address), + Token::Uint(value), + ]) + .unwrap(), + ), + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), + ]) + .build::(); + + // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. + let account = &mut vm.rich_accounts[0]; + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(reentrant_recipient_address), + calldata: reentrant_recipient_abi + .function("setX") + .unwrap() + .encode_input(&[]) + .unwrap(), + value: U256::from(1), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !tx1_result.result.is_failed(), + "Transaction 1 wasn't successful" + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata, + value, + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx2); + let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + tx2_result.result.is_failed(), + "Transaction 2 should have failed, but it succeeded" + ); + + let batch_result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +} + +pub(crate) fn test_reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_or_transfer::(TestOptions::Send(U256::zero())); + test_reentrancy_protection_send_or_transfer::(TestOptions::Send( + U256::from(10).pow(18.into()), + )); + test_reentrancy_protection_send_or_transfer::(TestOptions::Transfer(U256::zero())); + test_reentrancy_protection_send_or_transfer::(TestOptions::Transfer( + U256::from(10).pow(18.into()), + )); +} diff --git a/core/lib/multivm/src/versions/testonly/upgrade.rs b/core/lib/multivm/src/versions/testonly/upgrade.rs new file mode 100644 index 00000000000..9401cbb4ba8 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/upgrade.rs @@ -0,0 +1,322 @@ +use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; +use zksync_test_account::TxType; +use zksync_types::{ + ethabi::{Contract, Token}, + get_code_key, get_known_code_key, + protocol_upgrade::ProtocolUpgradeTxCommonData, + Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, + CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H256, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use super::{ + get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, read_test_contract, + tester::VmTesterBuilder, TestedVm, +}; +use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; + +/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: +/// - This transaction must be the only one in block +/// - If present, this transaction must be the first one in block +pub(crate) fn test_protocol_upgrade_is_first() { + let mut storage = get_empty_storage(); + let bytecode_hash = hash_bytecode(&read_test_contract()); + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + // Here we just use some random transaction of protocol upgrade type: + let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: Address::repeat_byte(1), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + // Another random upgrade transaction + let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: Address::repeat_byte(2), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + let normal_l1_transaction = vm.rich_accounts[0] + .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) + .tx; + + let expected_error = + Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); + + vm.vm.make_snapshot(); + // Test 1: there must be only one system transaction in block + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(another_protocol_upgrade_transaction); + + vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error.clone() + } + ); + + // Test 2: the protocol upgrade tx must be the first one in block + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error + } + ); + + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(protocol_upgrade_transaction); + vm.vm.push_transaction(normal_l1_transaction); + + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); +} + +/// In this test we try to test how force deployments could be done via protocol upgrade transactions. +pub(crate) fn test_force_deploy_upgrade() { + let mut storage = get_empty_storage(); + let bytecode_hash = hash_bytecode(&read_test_contract()); + let known_code_key = get_known_code_key(&bytecode_hash); + // It is generally expected that all the keys will be set as known prior to the protocol upgrade. + storage.set_value(known_code_key, u256_to_h256(1.into())); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let address_to_deploy = Address::repeat_byte(1); + // Here we just use some random transaction of protocol upgrade type: + let transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: address_to_deploy, + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + vm.vm.push_transaction(transaction); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = [( + get_code_key(&address_to_deploy), + h256_to_u256(bytecode_hash), + )]; + // Verify that the bytecode has been set correctly + vm.vm.verify_required_storage(&expected_slots); +} + +/// Here we show how the work with the complex upgrader could be done. +pub(crate) fn test_complex_upgrader() { + let mut storage = get_empty_storage(); + let bytecode_hash = hash_bytecode(&read_complex_upgrade()); + let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); + // Let's assume that the bytecode for the implementation of the complex upgrade + // is already deployed in some address in user space + let upgrade_impl = Address::repeat_byte(1); + let account_code_key = get_code_key(&upgrade_impl); + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage.set_value( + get_known_code_key(&msg_sender_test_hash), + u256_to_h256(1.into()), + ); + storage.set_value(account_code_key, bytecode_hash); + storage.store_factory_dep(bytecode_hash, read_complex_upgrade()); + storage.store_factory_dep(msg_sender_test_hash, read_msg_sender_test()); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let address_to_deploy1 = Address::repeat_byte(0xfe); + let address_to_deploy2 = Address::repeat_byte(0xff); + + let transaction = get_complex_upgrade_tx( + upgrade_impl, + address_to_deploy1, + address_to_deploy2, + bytecode_hash, + ); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = [ + ( + get_code_key(&address_to_deploy1), + h256_to_u256(bytecode_hash), + ), + ( + get_code_key(&address_to_deploy2), + h256_to_u256(bytecode_hash), + ), + ]; + // Verify that the bytecode has been set correctly + vm.vm.verify_required_storage(&expected_slots); +} + +#[derive(Debug, Clone)] +struct ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash: H256, + // The address on which to deploy the bytecode hash to + address: Address, + // Whether to run the constructor on the force deployment + call_constructor: bool, + // The value with which to initialize a contract + value: U256, + // The constructor calldata + input: Vec, +} + +fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { + let deployer = deployer_contract(); + let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); + + let encoded_deployments: Vec<_> = deployment + .iter() + .map(|deployment| { + Token::Tuple(vec![ + Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), + Token::Address(deployment.address), + Token::Bool(deployment.call_constructor), + Token::Uint(deployment.value), + Token::Bytes(deployment.input.clone()), + ]) + }) + .collect(); + + let params = [Token::Array(encoded_deployments)]; + + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let execute = Execute { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata, + factory_deps: vec![], + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +// Returns the transaction that performs a complex protocol upgrade. +// The first param is the address of the implementation of the complex upgrade +// in user-space, while the next 3 params are params of the implementation itself +// For the explanation for the parameters, please refer to: +// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol +fn get_complex_upgrade_tx( + implementation_address: Address, + address1: Address, + address2: Address, + bytecode_hash: H256, +) -> Transaction { + let impl_contract = get_complex_upgrade_abi(); + let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); + let impl_calldata = impl_function + .encode_input(&[ + Token::Address(address1), + Token::Address(address2), + Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), + ]) + .unwrap(); + + let complex_upgrader = get_complex_upgrader_abi(); + let upgrade_function = complex_upgrader.function("upgrade").unwrap(); + let complex_upgrader_calldata = upgrade_function + .encode_input(&[ + Token::Address(implementation_address), + Token::Bytes(impl_calldata), + ]) + .unwrap(); + + let execute = Execute { + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), + calldata: complex_upgrader_calldata, + factory_deps: vec![], + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +fn read_msg_sender_test() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") +} + +fn get_complex_upgrader_abi() -> Contract { + load_sys_contract("ComplexUpgrader") +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index dd407c61668..bb66eb2f770 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -1,392 +1,6 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use itertools::Itertools; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, - l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use super::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{get_complex_upgrade_abi, read_complex_upgrade}, -}; -use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::default_l1_batch, - vm_latest::constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -const CALLS_PER_TX: usize = 1_000; -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .chunks(CALLS_PER_TX) - .into_iter() - .map(|chunk| { - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(chunk.collect_vec())]) - .unwrap() - }) - .collect_vec(); - - encoded_calls -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute - // the gas limit - let batch_env = L1BatchEnv { - fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), - ..default_l1_batch(zksync_types::L1BatchNumber(1)) - }; - - let mut vm = VmTesterBuilder::new() - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_l1_batch_env(batch_env) - .build(); - - let bytecodes = test_data.bytecodes.iter().map(Vec::as_slice); - vm.vm.insert_bytecodes(bytecodes); - - let txs_data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - - for (i, data) in txs_data.into_iter().enumerate() { - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), - calldata: data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {test_data:#?}" - ); - } - - // Now we count how much gas was spent at the end of the batch - // It is assumed that the top level frame is the bootloader - vm.vm.enforce_state_diffs(test_data.state_diffs.clone()); - let gas_before = vm.vm.gas_remaining(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {test_data:?}" - ); - let gas_after = vm.vm.gas_remaining(); - assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); - - TestStatistics { - max_used_gas: gas_before - gas_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} +use crate::{versions::testonly::block_tip::test_dry_run_upper_bound, vm_fast::Vm}; #[test] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let statistics = vec![ - // max logs - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }, - // max messages - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }, - // long message - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }, - // max bytecodes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }, - // long bytecode - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![ - vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; - 1 - ], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }, - // lots of small repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }, - // lots of big repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - true, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, - ), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }, - // lots of small initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs( - false, - true, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, - ), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }, - // lots of large initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - false, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, - ), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }, - ]; - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); +fn dry_run_upper_bound() { + test_dry_run_upper_bound::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index 48e1b10de44..6075aea0989 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,52 +1,14 @@ -use assert_matches::assert_matches; -use zksync_types::U256; -use zksync_vm2::interface::HeapId; - use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, - versions::vm_fast::tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::bootloader::{test_bootloader_out_of_gas, test_dummy_bootloader}, + vm_fast::Vm, }; #[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - - verify_required_memory(&vm.vm.inner, vec![(correct_first_cell, HeapId::FIRST, 0)]); +fn dummy_bootloader() { + test_dummy_bootloader::>(); } #[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); +fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs index 3070140c00b..8a662c38827 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs @@ -1,38 +1,6 @@ -use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::bytecode, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; +use crate::{versions::testonly::bytecode_publishing::test_bytecode_publishing, vm_fast::Vm}; #[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); +fn bytecode_publishing() { + test_bytecode_publishing::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs deleted file mode 100644 index c97b38b6afc..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index f40e5336eb3..e7521d87c1c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -1,74 +1,6 @@ -use zksync_types::{Address, Execute, U256}; +use crate::{versions::testonly::circuits::test_circuits, vm_fast::Vm}; -use super::tester::VmTesterBuilder; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. #[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(Address::random()), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed(), "{res:#?}"); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 13] = [ - 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, - 0.0, 0.0, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - (s.secp256k1_verify, "secp256k1_verify"), - (s.transient_storage_checker, "transient_storage_checker"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } +fn circuits() { + test_circuits::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 34342d7f3b8..4ef86128734 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -1,252 +1,21 @@ -use ethabi::Token; -use zksync_types::{ - get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, - }, - CircuitsTracer, + versions::testonly::code_oracle::{ + test_code_oracle, test_code_oracle_big_bytecode, test_refunds_in_code_oracle, }, + vm_fast::Vm, }; -fn generate_large_bytecode() -> Vec { - // This is the maximal possible size of a zkEVM bytecode - vec![2u8; ((1 << 16) - 1) * 32] -} - #[test] -fn test_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - // Filling the zkevm bytecode - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode, - precompiles_contract_address, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // Now, we ask for the same bytecode. We use to partially check whether the memory page with - // the decommitted bytecode gets erased (it shouldn't). - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); -} - -fn find_code_oracle_cost_log( - precompiles_contract_address: Address, - logs: &[StorageLogWithPreviousValue], -) -> &StorageLogWithPreviousValue { - logs.iter() - .find(|log| { - *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() - }) - .expect("no code oracle cost log") +fn code_oracle() { + test_code_oracle::>(); } #[test] -fn test_code_oracle_big_bytecode() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let big_zkevm_bytecode = generate_large_bytecode(); - let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); - let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); - - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&big_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode, - precompiles_contract_address, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.insert_bytecodes([big_zkevm_bytecode.as_slice()]); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); +fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::>(); } #[test] fn refunds_in_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - // Execute code oracle twice with identical VM state that only differs in that the queried bytecode - // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas - // for already decommitted codes). - let mut oracle_costs = vec![]; - for decommit in [false, true] { - let mut vm = VmTesterBuilder::new() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode.clone(), - precompiles_contract_address, - )]) - .with_storage(storage.clone()) - .build(); - - vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); - - let account = &mut vm.rich_accounts[0]; - if decommit { - let (_, is_fresh) = vm.vm.inner.world_diff_mut().decommit_opcode( - &mut vm.vm.world, - &mut ((), CircuitsTracer::default()), - h256_to_u256(normal_zkevm_bytecode_hash), - ); - assert!(is_fresh); - } - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - let log = - find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); - oracle_costs.push(log.log.value); - } - - // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` - // in `CodeOracle.yul`. - let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); - assert_eq!( - code_oracle_refund, - (4 * (normal_zkevm_bytecode.len() / 32)).into() - ); + test_refunds_in_code_oracle::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs index c2ce02d39fe..c3cfd8b29f3 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs @@ -1,81 +1,6 @@ -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - vm_latest::utils::fee::get_batch_base_fee, -}; +use crate::{versions::testonly::default_aa::test_default_aa_interaction, vm_fast::Vm}; #[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = [ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage( - &expected_slots, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &vm.fee_account, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); +fn default_aa_interaction() { + test_default_aa_interaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index 3f0a47b980e..6ba55f8e1f8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -1,39 +1,6 @@ -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Execute}; +use crate::{versions::testonly::gas_limit::test_tx_gas_limit_offset, vm_fast::Vm}; -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_fast::tests::tester::VmTesterBuilder, - vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. #[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Account::default_fee() - }), - ); - - vm.vm.push_transaction(tx); - - assert!(!vm.vm.has_previous_far_calls()); - let gas_limit_from_memory = vm - .vm - .read_word_from_bootloader_heap(TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET); - - assert_eq!(gas_limit_from_memory, gas_limit); +fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 0447304f69f..5ec30907ed5 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -1,241 +1,22 @@ -use std::{collections::HashSet, iter}; - -use assert_matches::assert_matches; -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - use crate::{ - interface::{ - storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, - }, - versions::testonly::ContractToDeploy, - vm_fast::{ - tests::{ - tester::{TxType, VmTester, VmTesterBuilder}, - utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - vm::Vm, + versions::testonly::get_used_contracts::{ + test_get_used_contracts, test_get_used_contracts_with_far_call, + test_get_used_contracts_with_out_of_gas_far_call, }, + vm_fast::Vm, }; #[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_decommitted_hashes()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .decommitted_hashes() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm.decommitted_hashes().collect::>(), - known_bytecodes_without_base_system_contracts(&vm.vm) - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata: big_calldata, - value: Default::default(), - factory_deps: vec![vec![1; 32]], - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_base_system_contracts(&vm.vm).contains(&hash_to_u256)); - assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_base_system_contracts(vm: &Vm) -> HashSet { - let mut known_bytecodes_without_base_system_contracts = vm - .world - .bytecode_cache - .keys() - .cloned() - .collect::>(); - known_bytecodes_without_base_system_contracts - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { - let was_removed = - known_bytecodes_without_base_system_contracts.remove(&h256_to_u256(evm_emulator.hash)); - assert!(was_removed); - } - known_bytecodes_without_base_system_contracts -} - -/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial -/// decommitment cost (>10,000 gas). -fn inflated_counter_bytecode() -> Vec { - let mut counter_bytecode = read_test_contract(); - counter_bytecode.extend( - iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) - .take(10_000) - .flatten(), - ); - counter_bytecode -} - -#[derive(Debug)] -struct ProxyCounterData { - proxy_counter_address: Address, - counter_bytecode_hash: U256, -} - -fn execute_proxy_counter(gas: u32) -> (VmTester<()>, ProxyCounterData, VmExecutionResultAndLogs) { - let counter_bytecode = inflated_counter_bytecode(); - let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); - let counter_address = Address::repeat_byte(0x23); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_custom_contracts(vec![ContractToDeploy::new( - counter_bytecode, - counter_address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx( - &proxy_counter_bytecode, - Some(&[Token::Address(counter_address)]), - TxType::L2, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - !decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(deploy_tx.address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - let data = ProxyCounterData { - proxy_counter_address: deploy_tx.address, - counter_bytecode_hash, - }; - (vm, data, exec_result) +fn get_used_contracts() { + test_get_used_contracts::>(); } #[test] fn get_used_contracts_with_far_call() { - let (vm, data, exec_result) = execute_proxy_counter(100_000); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - decommitted_hashes.contains(&data.counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_far_call::>(); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (mut vm, data, exec_result) = execute_proxy_counter(10_000); - assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - decommitted_hashes.contains(&data.counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - // Execute another transaction with a successful far call and check that it's still charged for decommitment. - let account = &mut vm.rich_accounts[0]; - let (_, proxy_counter_abi) = read_proxy_counter_contract(); - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(data.proxy_counter_address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let proxy_counter_cost_key = StorageKey::new( - AccountTreeId::new(data.proxy_counter_address), - H256::from_low_u64_be(1), - ); - let far_call_cost_log = exec_result - .logs - .storage_logs - .iter() - .find(|log| log.log.key == proxy_counter_cost_key) - .expect("no cost log"); - assert!( - far_call_cost_log.previous_value.is_zero(), - "{far_call_cost_log:?}" - ); - let far_call_cost = h256_to_u256(far_call_cost_log.log.value); - assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); + test_get_used_contracts_with_out_of_gas_far_call::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs deleted file mode 100644 index dde83d8a9f3..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_latest::tests::tester::VmTesterBuilder; -use crate::vm_latest::types::inputs::system_env::TxExecutionMode; -use crate::vm_latest::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs index df8d992f02f..522aa2413f6 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs @@ -1,46 +1,6 @@ -use zksync_types::get_nonce_key; - -use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - vm_fast::tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; +use crate::{versions::testonly::is_write_initial::test_is_write_initial_behaviour, vm_fast::Vm}; #[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); +fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 5897ec5f266..0174eeffd7e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -1,198 +1,16 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::StorageWritesDeduplicator, - vm_fast::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - transaction_data::TransactionData, + versions::testonly::l1_tx_execution::{ + test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, }, + vm_fast::Vm, }; #[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 9 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - `gasPerPubdataByte` - // - `basePubdataSpent` - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. - let basic_initial_writes = 5; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - assert!(!res.result.is_failed()); - - for (expected_value, storage_location) in [ - (U256::from(1u32), known_codes_key), - (h256_to_u256(deploy_tx.bytecode_hash), account_code_key), - ] { - assert_eq!( - expected_value, - vm.vm.inner.world_diff().get_storage_state()[&( - *storage_location.address(), - h256_to_u256(*storage_location.key()) - )] - ); - } - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes, basic_initial_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. - // But now the base pubdata spent has changed too. - assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); - assert_eq!(res.repeated_storage_writes, 1); +fn l1_tx_execution() { + test_l1_tx_execution::>(); } #[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - value: 0.into(), - factory_deps: vec![], - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); +fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index fde94d9da6c..0823bee6cc9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -1,424 +1,33 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, L2BlockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, - ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{ - storage::ReadStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, - }, - versions::testonly::default_l1_batch, - vm_fast::{tests::tester::VmTesterBuilder, vm::Vm}, - vm_latest::{ - constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, - utils::l2_blocks::get_l2_block_hash_key, + versions::testonly::l2_blocks::{ + test_l2_block_first_in_batch, test_l2_block_initialization_number_non_zero, + test_l2_block_initialization_timestamp, test_l2_block_new_l2_block, + test_l2_block_same_l2_block, }, + vm_fast::Vm, }; -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: Some(H160::zero()), - calldata: vec![], - value: U256::zero(), - factory_deps: vec![], - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - #[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current L2 block to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); +fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::>(); } #[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first L2 block number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::>(); } #[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::>(); } #[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let mut storage_ptr = vm.vm.world.storage.borrow_mut(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr.set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.set_value( - prev_block_hash_position, - L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), - ); - drop(storage_ptr); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::>(); } #[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.write_to_bootloader_heap([ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ]) +fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index 730c573cdcf..f385ca2a438 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -1,10 +1,26 @@ +use std::{any::Any, collections::HashSet, fmt}; + +use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H160, H256, U256}; +use zksync_utils::h256_to_u256; +use zksync_vm2::interface::{Event, HeapId, StateInterface}; +use zksync_vm_interface::{ + storage::ReadStorage, CurrentExecutionState, L2BlockEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmInterfaceExt, +}; + +use super::Vm; +use crate::{ + interface::storage::{ImmutableStorageView, InMemoryStorage}, + versions::testonly::TestedVm, + vm_fast::CircuitsTracer, +}; + mod block_tip; mod bootloader; mod bytecode_publishing; -mod default_aa; -// mod call_tracer; FIXME: requires tracers mod circuits; mod code_oracle; +mod default_aa; mod gas_limit; mod get_used_contracts; mod is_write_initial; @@ -12,15 +28,132 @@ mod l1_tx_execution; mod l2_blocks; mod nonce_holder; mod precompiles; -// mod prestate_tracer; FIXME: is pre-state tracer still relevant? mod refunds; mod require_eip712; mod rollbacks; -mod sekp256r1; +mod secp256r1; mod simple_execution; mod storage; -mod tester; mod tracing_execution_error; mod transfer; mod upgrade; -mod utils; + +trait ObjectSafeEq: fmt::Debug + AsRef { + fn eq(&self, other: &dyn ObjectSafeEq) -> bool; +} + +#[derive(Debug)] +struct BoxedEq(T); + +impl AsRef for BoxedEq { + fn as_ref(&self) -> &dyn Any { + &self.0 + } +} + +impl ObjectSafeEq for BoxedEq { + fn eq(&self, other: &dyn ObjectSafeEq) -> bool { + let Some(other) = other.as_ref().downcast_ref::() else { + return false; + }; + self.0 == *other + } +} + +// TODO this doesn't include all the state of ModifiedWorld +#[derive(Debug)] +pub(crate) struct VmStateDump { + state: Box, + storage_writes: Vec<((H160, U256), U256)>, + events: Box<[Event]>, +} + +impl PartialEq for VmStateDump { + fn eq(&self, other: &Self) -> bool { + self.state.as_ref().eq(other.state.as_ref()) + && self.storage_writes == other.storage_writes + && self.events == other.events + } +} + +impl TestedVm for Vm> { + type StateDump = VmStateDump; + + fn dump_state(&self) -> Self::StateDump { + VmStateDump { + state: Box::new(BoxedEq(self.inner.dump_state())), + storage_writes: self.inner.get_storage_state().collect(), + events: self.inner.events().collect(), + } + } + + fn gas_remaining(&mut self) -> u32 { + self.gas_remaining() + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_current_execution_state() + } + + fn decommitted_hashes(&self) -> HashSet { + self.decommitted_hashes().collect() + } + + fn execute_with_state_diffs( + &mut self, + diffs: Vec, + mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.enforce_state_diffs(diffs); + self.execute(mode) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + self.insert_bytecodes(bytecodes.iter().copied()) + } + + fn known_bytecode_hashes(&self) -> HashSet { + self.world.bytecode_cache.keys().copied().collect() + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + let (_, is_fresh) = self.inner.world_diff_mut().decommit_opcode( + &mut self.world, + &mut ((), CircuitsTracer::default()), + h256_to_u256(code_hash), + ); + is_fresh + } + + fn verify_required_bootloader_heap(&self, required_values: &[(u32, U256)]) { + for &(slot, expected_value) in required_values { + let current_value = self.inner.read_heap_u256(HeapId::FIRST, slot * 32); + assert_eq!(current_value, expected_value); + } + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + self.write_to_bootloader_heap(cells.iter().copied()); + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + let storage_changes = self.inner.world_diff().get_storage_state(); + let main_storage = &mut self.world.storage; + storage_changes + .get(&(*key.account().address(), h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) + } + + fn last_l2_block_hash(&self) -> H256 { + self.bootloader_state.last_l2_block().get_hash() + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.bootloader_state.push_l2_block(block); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.push_transaction_inner(tx, refund, true); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index 6d1e0f016e9..438d6aabe55 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -1,180 +1,6 @@ -use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, - VmRevertReason, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} +use crate::{versions::testonly::nonce_holder::test_nonce_holder, vm_fast::Vm}; #[test] -fn test_nonce_holder() { - let mut account = Account::random(); - let hex_addr = hex::encode(account.address.to_fixed_bytes()); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![ContractToDeploy::account( - read_nonce_holder_tester().to_vec(), - account.address, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction = account.get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ); - let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { - unreachable!(); - }; - tx_data.signature = vec![test_mode.into()]; - vm.vm.push_transaction_inner(transaction, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); - } else { - assert!(!result.result.is_failed(), "{comment}: {result:?}"); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), - "Allowed to leave nonce as unused", - ); +fn nonce_holder() { + test_nonce_holder::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index b3ca1596217..ccf1463979c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -1,116 +1,19 @@ -use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; -use zksync_types::{Address, Execute}; - -use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + versions::testonly::precompiles::{test_ecrecover, test_keccak, test_sha256}, + vm_fast::Vm, }; #[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let keccak_count = exec_result.statistics.circuit_statistic.keccak256 - * get_geometry_config().cycles_per_keccak256_circuit as f32; - assert!(keccak_count >= 1000.0, "{keccak_count}"); +fn keccak() { + test_keccak::>(); } #[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(sha1000_calldata).unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let sha_count = exec_result.statistics.circuit_statistic.sha256 - * get_geometry_config().cycles_per_sha256_circuit as f32; - assert!(sha_count >= 1000.0, "{sha_count}"); +fn sha256() { + test_sha256::>(); } #[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account.address), - calldata: vec![], - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover - * get_geometry_config().cycles_per_ecrecover_circuit as f32; - assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); +fn ecrecover() { + test_ecrecover::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs deleted file mode 100644 index 63620c7d9ff..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_test_account::TxType; -use zksync_types::{utils::deployed_address_create, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::PrestateTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, -}; - -#[test] -fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - vm.deploy_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm.test_contract.unwrap(), - false, - Default::default(), - true, - TxType::L2, - ); - vm.vm.push_transaction(tx1); - - let contract_address = vm.test_contract.unwrap(); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - assert!(prestate_result.1.contains_key(&contract_address)); -} - -#[test] -fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); - vm.test_contract = Some(deployed_address); - - // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce2 = tx2.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); - - let account = &mut vm.rich_accounts[0]; - - //enter ether to contract to see difference in the balance post execution - let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), - calldata: Default::default(), - value: U256::from(100000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); - - let tx1 = Execute { - contract_address: deployed_address2, - calldata: Default::default(), - value: U256::from(200000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx1, None)); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - //assert that the pre-state contains both deployed contracts with balance zero - assert!(prestate_result.0.contains_key(&deployed_address)); - assert!(prestate_result.0.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.0[&deployed_address].balance, - Some(U256::zero()) - ); - assert_eq!( - prestate_result.0[&deployed_address2].balance, - Some(U256::zero()) - ); - - //assert that the post-state contains both deployed contracts with the correct balance - assert!(prestate_result.1.contains_key(&deployed_address)); - assert!(prestate_result.1.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.1[&deployed_address].balance, - Some(U256::from(100000)) - ); - assert_eq!( - prestate_result.1[&deployed_address2].balance, - Some(U256::from(200000)) - ); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 1856995149a..335cb4afb1c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -1,221 +1,16 @@ -use ethabi::Token; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{read_expensive_contract, read_test_contract}, + versions::testonly::refunds::{ + test_negative_pubdata_for_transaction, test_predetermined_refunded_gas, }, + vm_fast::Vm, }; #[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - vm.vm - .push_transaction_inner(tx.clone(), result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_transaction_inner(tx, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .deduplicated_storage_logs - .len(), - current_state_without_predefined_refunds - .deduplicated_storage_logs - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); +fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::>(); } #[test] fn negative_pubdata_for_transaction() { - let expensive_contract_address = Address::random(); - let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); - let expensive_function = expensive_contract.function("expensive").unwrap(); - let cleanup_function = expensive_contract.function("cleanUp").unwrap(); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - expensive_contract_bytecode, - expensive_contract_address, - )]) - .build(); - - let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: expensive_function - .encode_input(&[Token::Uint(10.into())]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. - let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: cleanup_function.encode_input(&[]).unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - assert!(result.refunds.operator_suggested_refund > 0); - assert_eq!( - result.refunds.gas_refunded, - result.refunds.operator_suggested_refund - ); + test_negative_pubdata_for_transaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index b4448683cf7..22e4ebf258c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -1,175 +1,6 @@ -use ethabi::Token; -use zksync_eth_signer::TransactionParameters; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; -use zksync_utils::h256_to_u256; +use crate::{versions::testonly::require_eip712::test_require_eip712, vm_fast::Vm}; -use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, -}; - -impl VmTester<()> { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &address, - ); - self.vm - .inner - .world_diff() - .get_storage_state() - .get(&(L2_BASE_TOKEN_ADDRESS, h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(self.vm.world.storage.read_value(&key))) - } -} - -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. #[test] -fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_custom_contracts(vec![ContractToDeploy::account( - bytecode, - account_abstraction.address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account_abstraction.address), - calldata: encoded_input, - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.into(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - Some(beneficiary.address), - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - vec![], - Default::default(), - ); - - let mut transaction_request: TransactionRequest = tx_712.into(); - transaction_request.chain_id = Some(chain_id.into()); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.into(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); +fn require_eip712() { + test_require_eip712::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index 1ac14e01f8b..e8af23fa1e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,205 +1,21 @@ -use assert_matches::assert_matches; -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Address, Execute, Nonce, U256}; -use zksync_vm_interface::VmInterfaceExt; - use crate::{ - interface::{ExecutionResult, TxExecutionMode}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, + versions::testonly::rollbacks::{ + test_rollback_in_call_mode, test_vm_loadnext_rollbacks, test_vm_rollbacks, }, + vm_fast::Vm, }; #[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), - ), - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - ]); - - pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_rollbacks() { + test_vm_rollbacks::>(); } #[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::>(); } #[test] fn rollback_in_call_mode() { - let counter_bytecode = read_test_contract(); - let counter_address = Address::repeat_byte(1); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::EthCall) - .with_custom_contracts(vec![ContractToDeploy::new( - counter_bytecode, - counter_address, - )]) - .with_random_rich_accounts(1) - .build(); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); - - let (compression_result, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(tx, true); - compression_result.unwrap(); - assert_matches!( - vm_result.result, - ExecutionResult::Revert { output } - if output.to_string().contains("This method always reverts") - ); - assert_eq!(vm_result.logs.storage_logs, []); + test_rollback_in_call_mode::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs new file mode 100644 index 00000000000..d9661c7f713 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs @@ -0,0 +1,6 @@ +use crate::{versions::testonly::secp256r1::test_secp256r1, vm_fast::Vm}; + +#[test] +fn secp256r1() { + test_secp256r1::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs index 8c916a541e2..4fe33d237e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs @@ -1,80 +1,14 @@ -use assert_matches::assert_matches; - use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::tester::{TxType, VmTesterBuilder}, + versions::testonly::simple_execution::{test_estimate_fee, test_simple_execute}, + vm_fast::Vm, }; #[test] fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); + test_estimate_fee::>(); } #[test] fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); + test_simple_execute::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index 2cfadb640e7..54a38814d3b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -1,133 +1,14 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{ - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::tester::VmTesterBuilder, + versions::testonly::storage::{test_storage_behavior, test_transient_storage_behavior}, + vm_fast::Vm, }; -fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { - let bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let test_contract_address = Address::random(); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata: first_tx_calldata, - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata: second_tx_calldata, - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "First tx failed"); - vm.vm.pop_snapshot_no_rollback(); - - // We rollback once because transient storage and rollbacks are a tricky combination. - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx2.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Second tx failed"); - vm.vm.rollback_to_the_latest_snapshot(); - - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Second tx failed on second run"); - - result.statistics.pubdata_published -} - -fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { - test_storage(vec![], second_tx_calldata) -} - #[test] -fn test_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - // In all of the tests below we provide the first tx to ensure that the tracers will not include - // the statistics from the start of the bootloader and will only include those for the transaction itself. - - let base_pubdata = test_storage_one_tx(vec![]); - let simple_test_pubdata = test_storage_one_tx( - contract - .function("simpleWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_pubdata = test_storage_one_tx( - contract - .function("resettingWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_via_revert_pubdata = test_storage_one_tx( - contract - .function("resettingWriteViaRevert") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - - assert_eq!(simple_test_pubdata - base_pubdata, 65); - assert_eq!(resetting_write_pubdata - base_pubdata, 34); - assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +fn storage_behavior() { + test_storage_behavior::>(); } #[test] -fn test_transient_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let first_tstore_test = contract - .function("testTransientStore") - .unwrap() - .encode_input(&[]) - .unwrap(); - // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. - let second_tstore_test = contract - .function("assertTValue") - .unwrap() - .encode_input(&[Token::Uint(U256::zero())]) - .unwrap(); - - test_storage(first_tstore_test, second_tstore_test); +fn transient_storage_behavior() { + test_transient_storage_behavior::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs deleted file mode 100644 index 212e569d510..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{get_empty_storage, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs deleted file mode 100644 index 6b1395f6634..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,240 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160, U256}; -use zksync_vm2::interface::{Event, StateInterface}; - -use super::VmTester; -use crate::{ - interface::{ - storage::ReadStorage, CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_fast::Vm, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce(Nonce, Nonce), - NonceReused(H160, Nonce), -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector: vec![144, 240, 73, 201], - data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector: vec![144, 240, 73, 201], - data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], - }) - - } - TxModifier::WrongNonce(expected, actual) => { - let function_selector = vec![98, 106, 222, 48]; - let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); - let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); - // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field - let nonce_padding = vec![0u8; 28]; - let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector, - data - }) - } - TxModifier::NonceReused(addr, nonce) => { - let function_selector = vec![233, 10, 222, 212]; - let addr = addr.as_bytes().to_vec(); - // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field - let addr_padding = vec![0u8; 12]; - // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field - let nonce_padding = vec![0u8; 28]; - let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector, - data, - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce(_, _) => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused(_, _) => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -// TODO this doesn't include all the state of ModifiedWorld -#[derive(Debug)] -struct VmStateDump { - state: S, - storage_writes: Vec<((H160, U256), U256)>, - events: Box<[Event]>, -} - -impl PartialEq for VmStateDump { - fn eq(&self, other: &Self) -> bool { - self.state == other.state - && self.storage_writes == other.storage_writes - && self.events == other.events - } -} - -impl Vm { - fn dump_state(&self) -> VmStateDump { - VmStateDump { - state: self.inner.dump_state(), - storage_writes: self.inner.get_storage_state().collect(), - events: self.inner.events().collect(), - } - } -} - -impl VmTester<()> { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - self.vm.make_snapshot(); - let inner_state_before = self.vm.dump_state(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_state(); - pretty_assertions::assert_eq!( - inner_state_before, - inner_state_after, - "Inner state before and after rollback should be equal" - ); - } else { - self.vm.pop_snapshot_no_rollback(); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs deleted file mode 100644 index 9549b32c4f1..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs +++ /dev/null @@ -1,231 +0,0 @@ -use std::{cell::RefCell, rc::Rc}; - -use zksync_contracts::BaseSystemContracts; -use zksync_test_account::{Account, TxType}; -use zksync_types::{ - block::L2BlockHasher, utils::deployed_address_create, AccountTreeId, Address, L1BatchNumber, - L2BlockNumber, Nonce, StorageKey, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use zksync_vm2::{interface::Tracer, WorldDiff}; - -use crate::{ - interface::{ - storage::{InMemoryStorage, StoragePtr}, - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - versions::{ - testonly::{default_l1_batch, default_system_env, make_account_rich, ContractToDeploy}, - vm_fast::{tests::utils::read_test_contract, vm::Vm}, - }, - vm_latest::utils::l2_blocks::load_last_l2_block, -}; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, Tr>, - pub(crate) storage: StoragePtr, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) fee_account: Address, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.inspect(&mut Tr::default(), VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = Rc::new(RefCell::new(get_empty_storage())); - *self.vm.inner.world_diff_mut() = WorldDiff::default(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(&mut self.storage.borrow_mut(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(&mut self.storage.borrow_mut(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let storage = self.storage.clone(); - { - let mut storage = storage.borrow_mut(); - // Commit pending storage changes (old VM versions commit them on successful execution) - for (&(address, slot), &value) in self.vm.inner.world_diff().get_storage_state() { - let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(slot)); - storage.set_value(key, u256_to_h256(value)); - } - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(&storage).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::custom(l1_batch, self.vm.system_env.clone(), storage); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - self.vm = vm; - } -} - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -impl VmTesterBuilder { - pub(crate) fn new() -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: default_system_env(), - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester<()> { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); - let storage_ptr = Rc::new(RefCell::new(raw_storage)); - for account in self.rich_accounts.iter() { - make_account_rich(&mut storage_ptr.borrow_mut(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(&mut storage_ptr.borrow_mut(), deployer); - } - - let fee_account = l1_batch_env.fee_account; - let vm = Vm::custom(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - deployer: self.deployer, - test_contract: None, - fee_account, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs index 89f0fa23620..b3f5b4b33bc 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -1,55 +1,8 @@ -use zksync_types::{Execute, H160}; - use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::tracing_execution_error::test_tracing_of_execution_errors, vm_fast::Vm, }; #[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![ContractToDeploy::new( - read_error_contract(), - contract_address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(contract_address), - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); +fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index ef510546f11..57c2c3e2c34 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -1,215 +1,16 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::get_balance, + versions::testonly::transfer::{ + test_reentrancy_protection_send_and_transfer, test_send_and_transfer, }, + vm_fast::Vm, }; -enum TestOptions { - Send(U256), - Transfer(U256), -} - -fn test_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let recipient_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - - let test_contract_address = Address::random(); - let recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - }; - - let mut storage = get_empty_storage(); - storage.set_value( - storage_key_for_eth_balance(&test_contract_address), - u256_to_h256(value), - ); - - let mut vm = VmTesterBuilder::new() - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - ContractToDeploy::new(test_bytecode, test_contract_address), - ContractToDeploy::new(recipient_bytecode, recipient_address), - ]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx_result.result.is_failed(), - "Transaction wasn't successful" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); - - let new_recipient_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &recipient_address, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - assert_eq!(new_recipient_balance, value); -} - #[test] -fn test_send_and_transfer() { - test_send_or_transfer(TestOptions::Send(U256::zero())); - test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); -} - -fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - - let test_contract_address = Address::random(); - let reentrant_recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipient_address), - Token::Uint(value), - ]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipient_address), - Token::Uint(value), - ]) - .unwrap(), - ), - }; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - ContractToDeploy::new(test_bytecode, test_contract_address), - ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), - ]) - .build(); - - // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. - let account = &mut vm.rich_accounts[0]; - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(reentrant_recipient_address), - calldata: reentrant_recipient_abi - .function("setX") - .unwrap() - .encode_input(&[]) - .unwrap(), - value: U256::from(1), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx1_result.result.is_failed(), - "Transaction 1 wasn't successful" - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value, - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - tx2_result.result.is_failed(), - "Transaction 2 should have failed, but it succeeded" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +fn send_and_transfer() { + test_send_and_transfer::>(); } #[test] -fn test_reentrancy_protection_send_and_transfer() { - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( - U256::from(10).pow(18.into()), - )); +fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index ba4863f7c45..4e4533c6868 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -1,343 +1,21 @@ -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, - }, - vm_fast::tests::{ - tester::VmTesterBuilder, - utils::{ - get_complex_upgrade_abi, read_complex_upgrade, read_test_contract, - verify_required_storage, - }, + versions::testonly::upgrade::{ + test_complex_upgrader, test_force_deploy_upgrade, test_protocol_upgrade_is_first, }, + vm_fast::Vm, }; -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block #[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); +fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::>(); } -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. #[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = [(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage( - &expected_slots, - &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff().get_storage_state(), - ); +fn force_deploy_upgrade() { + test_force_deploy_upgrade::>(); } -/// Here we show how the work with the complex upgrader could be done #[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - { - let mut storage = vm.storage.borrow_mut(); - storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage.set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage.set_value(account_code_key, bytecode_hash); - storage.store_factory_dep(bytecode_hash, read_complex_upgrade()); - storage.store_factory_dep(msg_sender_test_hash, read_msg_sender_test()); - } - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = [ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage( - &expected_slots, - &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff().get_storage_state(), - ); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: Some(COMPLEX_UPGRADER_ADDRESS), - calldata: complex_upgrader_calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") +fn complex_upgrader() { + test_complex_upgrader::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs deleted file mode 100644 index eebd825c045..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::collections::BTreeMap; - -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bootloader_code, read_bytecode, BaseSystemContracts, SystemContractCode, -}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H160, H256, - U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm2::interface::{HeapId, StateInterface}; - -use crate::interface::storage::ReadStorage; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -pub(crate) fn verify_required_memory( - state: &impl StateInterface, - required_values: Vec<(U256, HeapId, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state.read_heap_u256(memory_page, cell * 32); - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn verify_required_storage( - required_values: &[(H256, StorageKey)], - main_storage: &mut impl ReadStorage, - storage_changes: &BTreeMap<(H160, U256), U256>, -) { - for &(required_value, key) in required_values { - let current_value = storage_changes - .get(&(*key.account().address(), h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: &mut impl ReadStorage, - storage_changes: &BTreeMap<(H160, U256), U256>, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - - storage_changes - .get(&(*key.account().address(), h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_bootloader_code(test); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn load_precompiles_contract() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -pub(crate) fn read_expensive_contract() -> (Vec, Contract) { - const PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; - (read_bytecode(PATH), load_contract(PATH)) -} - -pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { - const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; - (read_bytecode(PATH), load_contract(PATH)) -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index 9909ca24937..df4a36f2d3d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -1,428 +1,9 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, - l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tests::tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTesterBuilder, - }, - tracers::PubdataTracer, - HistoryEnabled, TracerDispatcher, - }, + versions::testonly::block_tip::test_dry_run_upper_bound, + vm_latest::{HistoryEnabled, Vm}, }; -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -const CALLS_PER_TX: usize = 1_000; -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .chunks(CALLS_PER_TX) - .into_iter() - .map(|chunk| { - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(chunk.collect_vec())]) - .unwrap() - }) - .collect_vec(); - - encoded_calls -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute - // the gas limit - - let batch_env = L1BatchEnv { - fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), - ..default_l1_batch(zksync_types::L1BatchNumber(1)) - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_l1_batch_env(batch_env) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let txs_data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - - for (i, data) in txs_data.into_iter().enumerate() { - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), - calldata: data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {:#?}", - test_data - ); - } - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - crate::vm_latest::MultiVMSubversion::latest(), - ); - - let result = vm.vm.inspect_inner( - &mut TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used - ); - - TestStatistics { - max_used_gas: ergs_before - ergs_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} - #[test] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let statistics = vec![ - // max logs - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }, - // max messages - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }, - // long message - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }, - // max bytecodes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }, - // long bytecode - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![ - vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; - 1 - ], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }, - // lots of small repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }, - // lots of big repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - true, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, - ), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }, - // lots of small initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs( - false, - true, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, - ), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }, - // lots of large initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - false, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, - ), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }, - ]; - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); +fn dry_run_upper_bound() { + test_dry_run_upper_bound::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs index 9d23f658cb8..22239a6c1e3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs @@ -1,57 +1,14 @@ -use assert_matches::assert_matches; -use zksync_types::U256; - use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, - vm_latest::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, + versions::testonly::bootloader::{test_bootloader_out_of_gas, test_dummy_bootloader}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); +fn dummy_bootloader() { + test_dummy_bootloader::>(); } #[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); +fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs index 2ed9948af81..e0727fbed89 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs @@ -1,41 +1,9 @@ use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::bytecode, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, + versions::testonly::bytecode_publishing::test_bytecode_publishing, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); +fn bytecode_publishing() { + test_bytecode_publishing::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index e7f26b7faf8..e1dfdc7e68c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -3,17 +3,14 @@ use std::sync::Arc; use once_cell::sync::OnceCell; use zksync_types::{Address, Execute}; +use super::TestedLatestVm; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, tracers::CallTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, + versions::testonly::{ + read_max_depth_contract, read_test_contract, ContractToDeploy, VmTesterBuilder, }, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, }; // This test is ultra slow, so it's ignored by default. @@ -22,14 +19,13 @@ use crate::{ fn test_max_depth() { let contarct = read_max_depth_contract(); let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); + .with_custom_contracts(vec![ContractToDeploy::account(contarct, address)]) + .build::(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( @@ -54,16 +50,15 @@ fn test_max_depth() { #[test] fn test_basic_behavior() { - let contarct = read_test_contract(); + let contract = read_test_contract(); let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); let increment_by_6_calldata = "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index c3c6816cbd8..690af7d2a35 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -1,76 +1,9 @@ -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, + versions::testonly::circuits::test_circuits, + vm_latest::{HistoryEnabled, Vm}, }; -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. #[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(Address::random()), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 13] = [ - 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, - 0.0, 0.0, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - (s.secp256k1_verify, "secp256k1_verify"), - (s.transient_storage_checker, "transient_storage_checker"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } +fn circuits() { + test_circuits::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index b15ef7fde2b..e50e2aafcbf 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -1,282 +1,21 @@ -use ethabi::Token; -use zk_evm_1_5_0::{ - aux_structures::{MemoryPage, Timestamp}, - zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, -}; -use zksync_types::{ - get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, - }, - HistoryEnabled, + versions::testonly::code_oracle::{ + test_code_oracle, test_code_oracle_big_bytecode, test_refunds_in_code_oracle, }, + vm_latest::{HistoryEnabled, Vm}, }; -fn generate_large_bytecode() -> Vec { - // This is the maximal possible size of a zkEVM bytecode - vec![2u8; ((1 << 16) - 1) * 32] -} - #[test] -fn test_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - // Filling the zkevm bytecode - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode, - precompiles_contract_address, - false, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(normal_zkevm_bytecode_hash), - bytes_to_be_words(normal_zkevm_bytecode), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // Now, we ask for the same bytecode. We use to partially check whether the memory page with - // the decommitted bytecode gets erased (it shouldn't). - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); -} - -fn find_code_oracle_cost_log( - precompiles_contract_address: Address, - logs: &[StorageLogWithPreviousValue], -) -> &StorageLogWithPreviousValue { - logs.iter() - .find(|log| { - *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() - }) - .expect("no code oracle cost log") +fn code_oracle() { + test_code_oracle::>(); } #[test] -fn test_code_oracle_big_bytecode() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let big_zkevm_bytecode = generate_large_bytecode(); - let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); - let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); - - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&big_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode, - precompiles_contract_address, - false, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(big_zkevm_bytecode_hash), - bytes_to_be_words(big_zkevm_bytecode), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); +fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::>(); } #[test] fn refunds_in_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_words = bytes_to_be_words(normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - // Execute code oracle twice with identical VM state that only differs in that the queried bytecode - // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas - // for already decommitted codes). - let mut oracle_costs = vec![]; - for decommit in [false, true] { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode.clone(), - precompiles_contract_address, - false, - )]) - .with_storage(storage.clone()) - .build(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(normal_zkevm_bytecode_hash), - normal_zkevm_bytecode_words.clone(), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - if decommit { - let (header, normalized_preimage) = - ContractCodeSha256Format::normalize_for_decommitment(&normal_zkevm_bytecode_hash.0); - let query = vm - .vm - .state - .prepare_to_decommit( - 0, - header, - normalized_preimage, - MemoryPage(123), - Timestamp(0), - ) - .unwrap(); - - assert!(query.is_fresh); - vm.vm.state.execute_decommit(0, query).unwrap(); - } - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - let log = - find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); - oracle_costs.push(log.log.value); - } - - // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` - // in `CodeOracle.yul`. - let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); - assert_eq!( - code_oracle_refund, - (4 * normal_zkevm_bytecode_words.len()).into() - ); + test_refunds_in_code_oracle::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs index aa3eb5e752c..3d0e21c2466 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs @@ -1,79 +1,9 @@ -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - HistoryEnabled, - }, + versions::testonly::default_aa::test_default_aa_interaction, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); +fn default_aa_interaction() { + test_default_aa_interaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs index 34780b73eb0..4d6e77aed51 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -18,15 +18,12 @@ use zksync_utils::{ bytes_to_be_words, h256_to_u256, }; +use super::TestedLatestVm; use crate::{ interface::{ storage::InMemoryStorage, TxExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, }, - versions::testonly::default_system_env, - vm_latest::{ - tests::tester::{VmTester, VmTesterBuilder}, - HistoryEnabled, - }, + versions::testonly::{default_system_env, VmTester, VmTesterBuilder}, }; const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; @@ -85,7 +82,7 @@ impl EvmTestBuilder { self } - fn build(self) -> VmTester { + fn build(self) -> VmTester { let mock_emulator = read_bytecode(MOCK_EMULATOR_PATH); let mut storage = self.storage; let mut system_env = default_system_env(); @@ -119,11 +116,11 @@ impl EvmTestBuilder { } } - VmTesterBuilder::new(HistoryEnabled) + VmTesterBuilder::new() .with_system_env(system_env) .with_storage(storage) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) + .with_rich_accounts(1) .build() } } @@ -137,12 +134,12 @@ fn tracing_evm_contract_deployment() { // The EVM emulator will not be accessed, so we set it to a dummy value. system_env.base_system_smart_contracts.evm_emulator = Some(system_env.base_system_smart_contracts.default_aa.clone()); - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_system_env(system_env) .with_storage(storage) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; let args = [Token::Bytes((0..32).collect())]; @@ -222,7 +219,7 @@ fn mock_emulator_with_payment(deploy_emulator: bool) { } fn test_payment( - vm: &mut VmTester, + vm: &mut VmTester, mock_emulator_abi: ðabi::Contract, balance: &mut U256, transferred_value: U256, @@ -407,7 +404,7 @@ fn mock_emulator_with_delegate_call() { } fn test_delegate_call( - vm: &mut VmTester, + vm: &mut VmTester, test_fn: ðabi::Function, from: Address, to: Address, @@ -485,7 +482,7 @@ fn mock_emulator_with_static_call() { } fn test_static_call( - vm: &mut VmTester, + vm: &mut VmTester, test_fn: ðabi::Function, from: Address, to: Address, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs index cc9aac5bb91..5aa7ab9e9c7 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs @@ -1,46 +1,9 @@ -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Execute}; - use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_latest::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, + versions::testonly::gas_limit::test_tx_gas_limit_offset, + vm_latest::{HistoryEnabled, Vm}, }; -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. #[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Account::default_fee() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); +fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index d7cadc54b44..7f39915f2b6 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -1,252 +1,22 @@ -use std::{ - collections::{HashMap, HashSet}, - iter, - str::FromStr, -}; - -use assert_matches::assert_matches; -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; -use zk_evm_1_5_0::{ - abstractions::DecommittmentProcessor, - aux_structures::{DecommittmentQuery, MemoryPage, Timestamp}, - zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, -}; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Address, Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; -use zksync_vm_interface::VmExecutionResultAndLogs; - use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceExt, + versions::testonly::get_used_contracts::{ + test_get_used_contracts, test_get_used_contracts_with_far_call, + test_get_used_contracts_with_out_of_gas_far_call, }, - vm_latest::{ - tests::{ - tester::{TxType, VmTester, VmTesterBuilder}, - utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_base_system_contracts(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata: big_calldata, - value: Default::default(), - factory_deps: vec![vec![1; 32]], - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_base_system_contracts(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -#[test] -fn test_contract_is_used_right_after_prepare_to_decommit() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(vm.vm.get_used_contracts().is_empty()); - - let bytecode_hash = - U256::from_str("0x100067ff3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185") - .unwrap(); - vm.vm - .state - .decommittment_processor - .populate(vec![(bytecode_hash, vec![])], Timestamp(0)); - - let header = hex::decode("0100067f").unwrap(); - let normalized_preimage = - hex::decode("f3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185").unwrap(); - vm.vm - .state - .decommittment_processor - .prepare_to_decommit( - 0, - DecommittmentQuery { - header: VersionedHashHeader(header.try_into().unwrap()), - normalized_preimage: VersionedHashNormalizedPreimage( - normalized_preimage.try_into().unwrap(), - ), - timestamp: Timestamp(0), - memory_page: MemoryPage(0), - decommitted_length: 0, - is_fresh: false, - }, - ) - .unwrap(); - - assert_eq!(vm.vm.get_used_contracts(), vec![bytecode_hash]); -} - -fn known_bytecodes_without_base_system_contracts( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_base_system_contracts = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - known_bytecodes_without_base_system_contracts - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { - known_bytecodes_without_base_system_contracts - .remove(&h256_to_u256(evm_emulator.hash)) - .unwrap(); - } - known_bytecodes_without_base_system_contracts -} - -/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial -/// decommitment cost (>10,000 gas). -fn inflated_counter_bytecode() -> Vec { - let mut counter_bytecode = read_test_contract(); - counter_bytecode.extend( - iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) - .take(10_000) - .flatten(), - ); - counter_bytecode -} - -fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { - let counter_bytecode = inflated_counter_bytecode(); - let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); - let counter_address = Address::repeat_byte(0x23); - - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx( - &proxy_counter_bytecode, - Some(&[Token::Address(counter_address)]), - TxType::L2, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - !decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(deploy_tx.address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - (vm, counter_bytecode_hash, exec_result) +fn get_used_contracts() { + test_get_used_contracts::>(); } #[test] fn get_used_contracts_with_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_far_call::>(); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); - assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_out_of_gas_far_call::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs index 8206cfa9be6..193fc586079 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs @@ -1,49 +1,9 @@ -use zksync_types::get_nonce_key; - use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - vm_latest::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, + versions::testonly::is_write_initial::test_is_write_initial_behaviour, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); +fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index e0e4e8228f9..4b7429c2829 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,195 +1,16 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_test_account::Account; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, K256PrivateKey, U256, -}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::StorageWritesDeduplicator, - vm_latest::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, + versions::testonly::l1_tx_execution::{ + test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, }, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 9 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - `gasPerPubdataByte` - // - `basePubdataSpent` - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. - let basic_initial_writes = 5; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data = TransactionData::new(deploy_tx.tx.clone(), false); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes, basic_initial_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. - // But now the base pubdata spent has changed too. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); +fn l1_tx_execution() { + test_l1_tx_execution::>(); } #[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![Account::new( - K256PrivateKey::from_bytes([0xad; 32].into()).unwrap(), - )]) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - value: 0.into(), - factory_deps: vec![], - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); +fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs index 1b5c3db59f7..82003b4a6ab 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs @@ -1,433 +1,33 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, L2BlockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, - ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, - }, - vm_latest::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, + versions::testonly::l2_blocks::{ + test_l2_block_first_in_batch, test_l2_block_initialization_number_non_zero, + test_l2_block_initialization_timestamp, test_l2_block_new_l2_block, + test_l2_block_same_l2_block, }, - HistoryMode, + vm_latest::{HistoryEnabled, Vm}, }; -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute::default(), - received_timestamp_ms: 0, - raw_bytes: None, - } -} - #[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); +fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::>(); } #[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::>(); } #[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::>(); } #[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::>(); } #[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) +fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index fadb05cc4d1..2835f5b6faa 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -1,3 +1,30 @@ +use std::collections::{HashMap, HashSet}; + +use zk_evm_1_5_0::{ + aux_structures::{MemoryPage, Timestamp}, + vm_state::VmLocalState, + zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, +}; +use zksync_types::{writes::StateDiffRecord, StorageKey, StorageValue, Transaction, H256, U256}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; + +use super::{HistoryEnabled, Vm}; +use crate::{ + interface::{ + storage::{InMemoryStorage, ReadStorage, StorageView, WriteStorage}, + CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, + }, + versions::testonly::TestedVm, + vm_latest::{ + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{event_sink::InMemoryEventSink, history_recorder::HistoryRecorder}, + tracers::PubdataTracer, + types::internals::TransactionData, + utils::logs::StorageLogQuery, + AppDataFrameManagerWithHistory, HistoryMode, SimpleMemory, TracerDispatcher, + }, +}; + mod bootloader; mod default_aa; // TODO - fix this test @@ -20,11 +47,238 @@ mod prestate_tracer; mod refunds; mod require_eip712; mod rollbacks; -mod sekp256r1; +mod secp256r1; mod simple_execution; mod storage; -mod tester; mod tracing_execution_error; mod transfer; mod upgrade; -mod utils; + +type TestedLatestVm = Vm, HistoryEnabled>; + +impl TestedVm for TestedLatestVm { + type StateDump = VmInstanceInnerState; + + fn dump_state(&self) -> Self::StateDump { + self.dump_inner_state() + } + + fn gas_remaining(&mut self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_current_execution_state() + } + + fn decommitted_hashes(&self) -> HashSet { + self.get_used_contracts().into_iter().collect() + } + + fn execute_with_state_diffs( + &mut self, + diffs: Vec, + mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + let pubdata_tracer = PubdataTracer::new_with_forced_state_diffs( + self.batch_env.clone(), + VmExecutionMode::Batch, + diffs, + crate::vm_latest::MultiVMSubversion::latest(), + ); + self.inspect_inner(&mut TracerDispatcher::default(), mode, Some(pubdata_tracer)) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + let bytecodes = bytecodes + .iter() + .map(|&bytecode| { + let hash = hash_bytecode(bytecode); + let words = bytes_to_be_words(bytecode.to_vec()); + (h256_to_u256(hash), words) + }) + .collect(); + self.state + .decommittment_processor + .populate(bytecodes, Timestamp(0)); + } + + fn known_bytecode_hashes(&self) -> HashSet { + self.state + .decommittment_processor + .known_bytecodes + .inner() + .keys() + .copied() + .collect() + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + let (header, normalized_preimage) = + ContractCodeSha256Format::normalize_for_decommitment(&code_hash.0); + let query = self + .state + .prepare_to_decommit( + 0, + header, + normalized_preimage, + MemoryPage(123), + Timestamp(0), + ) + .unwrap(); + self.state.execute_decommit(0, query).unwrap(); + query.is_fresh + } + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]) { + for &(slot, required_value) in cells { + let current_value = self + .state + .memory + .read_slot(BOOTLOADER_HEAP_PAGE as usize, slot as usize) + .value; + assert_eq!(current_value, required_value); + } + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + let timestamp = Timestamp(self.state.local_state.timestamp); + self.state + .memory + .populate_page(BOOTLOADER_HEAP_PAGE as usize, cells.to_vec(), timestamp) + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + self.state.storage.storage.read_from_storage(&key) + } + + fn last_l2_block_hash(&self) -> H256 { + self.bootloader_state.last_l2_block().get_hash() + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.bootloader_state.push_l2_block(block); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + let tx = TransactionData::new(tx, false); + let overhead = tx.overhead_gas(); + self.push_raw_transaction(tx, overhead, refund, true) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct ModifiedKeysMap(HashMap); + +impl ModifiedKeysMap { + fn new(storage: &mut StorageView) -> Self { + let mut modified_keys = storage.modified_storage_keys().clone(); + let inner = storage.inner_mut(); + // Remove modified keys that were set to the same value (e.g., due to a rollback). + modified_keys.retain(|key, value| inner.read_value(key) != *value); + Self(modified_keys) + } +} + +// We consider hashmaps to be equal even if there is a key +// that is not present in one but has zero value in another. +impl PartialEq for ModifiedKeysMap { + fn eq(&self, other: &Self) -> bool { + for (key, value) in &self.0 { + if *value != other.0.get(key).copied().unwrap_or_default() { + return false; + } + } + for (key, value) in &other.0 { + if *value != self.0.get(key).copied().unwrap_or_default() { + return false; + } + } + true + } +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct DecommitterTestInnerState { + /// There is no way to "truly" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + pub(crate) known_bytecodes: HistoryRecorder>, H>, + pub(crate) decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct StorageOracleInnerState { + /// There is no way to "truly" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, + pub(crate) paid_changes: HistoryRecorder, H>, + pub(crate) initial_values: HistoryRecorder, H>, + pub(crate) returned_io_refunds: HistoryRecorder, H>, + pub(crate) returned_pubdata_costs: HistoryRecorder, H>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct PrecompileProcessorTestInnerState { + pub(crate) timestamp_history: HistoryRecorder, H>, +} + +/// A struct that encapsulates the state of the VM's oracles +/// The state is to be used in tests. +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct VmInstanceInnerState { + event_sink: InMemoryEventSink, + precompile_processor_state: PrecompileProcessorTestInnerState, + memory: SimpleMemory, + decommitter_state: DecommitterTestInnerState, + storage_oracle_state: StorageOracleInnerState, + local_state: VmLocalState, +} + +impl Vm, H> { + // Dump inner state of the VM. + pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { + let event_sink = self.state.event_sink.clone(); + let precompile_processor_state = PrecompileProcessorTestInnerState { + timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), + }; + let memory = self.state.memory.clone(); + let decommitter_state = DecommitterTestInnerState { + modified_storage_keys: ModifiedKeysMap::new( + &mut self + .state + .decommittment_processor + .get_storage() + .borrow_mut(), + ), + known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), + decommitted_code_hashes: self + .state + .decommittment_processor + .get_decommitted_code_hashes_with_history() + .clone(), + }; + + let storage_oracle_state = StorageOracleInnerState { + modified_storage_keys: ModifiedKeysMap::new( + &mut self.state.storage.storage.get_ptr().borrow_mut(), + ), + frames_stack: self.state.storage.storage_frames_stack.clone(), + paid_changes: self.state.storage.paid_changes.clone(), + initial_values: self.state.storage.initial_values.clone(), + returned_io_refunds: self.state.storage.returned_io_refunds.clone(), + returned_pubdata_costs: self.state.storage.returned_pubdata_costs.clone(), + }; + let local_state = self.state.local_state.clone(); + + VmInstanceInnerState { + event_sink, + precompile_processor_state, + memory, + decommitter_state, + storage_oracle_state, + local_state, + } + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 397790a7c95..c7ea3242d4a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -1,187 +1,9 @@ -use zksync_types::{Execute, Nonce}; - use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, - VmRevertReason, - }, - vm_latest::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, + versions::testonly::nonce_holder::test_nonce_holder, + vm_latest::{HistoryEnabled, Vm}, }; -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - #[test] -fn test_nonce_holder() { - let mut account = Account::random(); - let hex_addr = hex::encode(account.address.to_fixed_bytes()); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let tx = account.get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ); - let mut transaction_data = TransactionData::new(tx, false); - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), - "Allowed to leave nonce as unused", - ); +fn nonce_holder() { + test_nonce_holder::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs index 110b14146c7..7ef45721ea5 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs @@ -1,142 +1,19 @@ -use zk_evm_1_5_0::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, + versions::testonly::precompiles::{test_ecrecover, test_keccak, test_sha256}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); +fn keccak() { + test_keccak::>(); } #[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); +fn sha256() { + test_sha256::>(); } #[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account.address), - calldata: Vec::new(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); +fn ecrecover() { + test_ecrecover::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index 230b1d0ad87..838c4e342dc 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -4,25 +4,22 @@ use once_cell::sync::OnceCell; use zksync_test_account::TxType; use zksync_types::{utils::deployed_address_create, Execute, U256}; +use super::TestedLatestVm; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, tracers::PrestateTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, + versions::testonly::{read_simple_transfer_contract, VmTesterBuilder}, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, }; #[test] fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); vm.deploy_test_contract(); let account = &mut vm.rich_accounts[0]; @@ -53,37 +50,27 @@ fn test_prestate_tracer() { #[test] fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; + let account = &mut vm.rich_accounts[0]; + let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce = tx.nonce().unwrap().0.into(); vm.vm.push_transaction(tx); vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); + let deployed_address = deployed_address_create(account.address, nonce); vm.test_contract = Some(deployed_address); // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; + let tx2 = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce2 = tx2.nonce().unwrap().0.into(); vm.vm.push_transaction(tx2); vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); + let deployed_address2 = deployed_address_create(account.address, nonce2); let account = &mut vm.rich_accounts[0]; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index c00192aa8f1..dfbec170682 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -1,228 +1,16 @@ -use ethabi::Token; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{read_expensive_contract, read_test_contract}, - }, - types::internals::TransactionData, - HistoryEnabled, + versions::testonly::refunds::{ + test_negative_pubdata_for_transaction, test_predetermined_refunded_gas, }, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx = TransactionData::new(tx, false); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .deduplicated_storage_logs - .len(), - current_state_without_predefined_refunds - .deduplicated_storage_logs - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); +fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::>(); } #[test] fn negative_pubdata_for_transaction() { - let expensive_contract_address = Address::random(); - let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); - let expensive_function = expensive_contract.function("expensive").unwrap(); - let cleanup_function = expensive_contract.function("cleanUp").unwrap(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - expensive_contract_bytecode, - expensive_contract_address, - false, - )]) - .build(); - - let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: expensive_function - .encode_input(&[Token::Uint(10.into())]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. - let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: cleanup_function.encode_input(&[]).unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - assert!(result.refunds.operator_suggested_refund > 0); - assert_eq!( - result.refunds.gas_refunded, - result.refunds.operator_suggested_refund - ); + test_negative_pubdata_for_transaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index 1f38c6f947e..470ddb28699 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -1,166 +1,9 @@ -use ethabi::Token; -use zksync_eth_signer::TransactionParameters; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, + versions::testonly::require_eip712::test_require_eip712, + vm_latest::{HistoryEnabled, Vm}, }; -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM #[test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account_abstraction.address), - calldata: encoded_input, - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.into(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - Some(beneficiary.address), - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - vec![], - Default::default(), - ); - - let mut transaction_request: TransactionRequest = tx_712.into(); - transaction_request.chain_id = Some(chain_id.into()); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.into(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); +fn require_eip712() { + test_require_eip712::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 2e854cfc784..c948315266a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,186 +1,34 @@ -use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{get_nonce_key, Address, Execute, Nonce, U256}; +use zksync_test_account::{DeployContractsTx, TxType}; +use zksync_types::{get_nonce_key, U256}; +use super::TestedLatestVm; use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, + TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, + versions::testonly::{ + rollbacks::{test_rollback_in_call_mode, test_vm_loadnext_rollbacks, test_vm_rollbacks}, + VmTesterBuilder, + }, vm_latest::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, + types::internals::ZkSyncVmState, BootloaderState, HistoryEnabled, HistoryMode, + SimpleMemory, ToTracerPointer, Vm, VmTracer, }, }; #[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), - ), - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_rollbacks() { + test_vm_rollbacks::>(); } #[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::>(); } // Testing tracer that does not allow the recursion to go deeper than a certain limit @@ -213,11 +61,11 @@ fn test_layered_rollback() { // This test checks that the layered rollbacks work correctly, i.e. // the rollback by the operator will always revert all the changes - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; let loadnext_contract = get_loadnext_contract().bytecode; @@ -292,34 +140,5 @@ fn test_layered_rollback() { #[test] fn rollback_in_call_mode() { - let counter_bytecode = read_test_contract(); - let counter_address = Address::repeat_byte(1); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::EthCall) - .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) - .with_random_rich_accounts(1) - .build(); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); - - let (compression_result, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(tx, true); - compression_result.unwrap(); - assert_matches!( - vm_result.result, - ExecutionResult::Revert { output } - if output.to_string().contains("This method always reverts") - ); - - let storage_logs = vm - .vm - .get_current_execution_state() - .deduplicated_storage_logs; - assert!( - storage_logs.iter().all(|log| !log.is_write()), - "{storage_logs:?}" - ); + test_rollback_in_call_mode::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs new file mode 100644 index 00000000000..11534a26ded --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs @@ -0,0 +1,9 @@ +use crate::{ + versions::testonly::secp256r1::test_secp256r1, + vm_latest::{HistoryEnabled, Vm}, +}; + +#[test] +fn secp256r1() { + test_secp256r1::>(); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs deleted file mode 100644 index 93be9506a3b..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs +++ /dev/null @@ -1,74 +0,0 @@ -use zk_evm_1_5_0::zkevm_opcode_defs::p256; -use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; -use zksync_types::{web3::keccak256, Execute, H256, U256}; -use zksync_utils::h256_to_u256; - -use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, -}; - -#[test] -fn test_sekp256r1() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_execution_mode(TxExecutionMode::EthCall) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - // The digest, secret key and public key were copied from the following test suit: `https://github.com/hyperledger/besu/blob/b6a6402be90339367d5bcabcd1cfd60df4832465/crypto/algorithms/src/test/java/org/hyperledger/besu/crypto/SECP256R1Test.java#L36` - let sk = p256::SecretKey::from_slice( - &hex::decode("519b423d715f8b581f4fa8ee59f4771a5b44c8130b4e3eacca54a56dda72b464").unwrap(), - ) - .unwrap(); - let sk = p256::ecdsa::SigningKey::from(sk); - - let digest = keccak256(&hex::decode("5905238877c77421f73e43ee3da6f2d9e2ccad5fc942dcec0cbd25482935faaf416983fe165b1a045ee2bcd2e6dca3bdf46c4310a7461f9a37960ca672d3feb5473e253605fb1ddfd28065b53cb5858a8ad28175bf9bd386a5e471ea7a65c17cc934a9d791e91491eb3754d03799790fe2d308d16146d5c9b0d0debd97d79ce8").unwrap()); - let public_key_encoded = hex::decode("1ccbe91c075fc7f4f033bfa248db8fccd3565de94bbfb12f3c59ff46c271bf83ce4014c68811f9a21a1fdb2c0e6113e06db7ca93b7404e78dc7ccd5ca89a4ca9").unwrap(); - - let (sig, _) = sk.sign_prehash_recoverable(&digest).unwrap(); - let (r, s) = sig.split_bytes(); - - let mut encoded_r = [0u8; 32]; - encoded_r.copy_from_slice(&r); - - let mut encoded_s = [0u8; 32]; - encoded_s.copy_from_slice(&s); - - let mut x = [0u8; 32]; - x.copy_from_slice(&public_key_encoded[0..32]); - - let mut y = [0u8; 32]; - y.copy_from_slice(&public_key_encoded[32..64]); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), - calldata: [digest, encoded_r, encoded_s, x, y].concat(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let execution_result = vm.vm.execute(VmExecutionMode::Batch); - - let ExecutionResult::Success { output } = execution_result.result else { - panic!("batch failed") - }; - - let output = H256::from_slice(&output); - - assert_eq!( - h256_to_u256(output), - U256::from(1u32), - "verification was not successful" - ); -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs index cd020ee9f96..29072e66b1e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs @@ -1,83 +1,14 @@ -use assert_matches::assert_matches; - use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, + versions::testonly::simple_execution::{test_estimate_fee, test_simple_execute}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); + test_estimate_fee::>(); } #[test] fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); + test_simple_execute::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs index 126d174a646..4cb03875a0f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs @@ -1,188 +1,14 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Address, Execute, U256}; - use crate::{ - interface::{ - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, + versions::testonly::storage::{test_storage_behavior, test_transient_storage_behavior}, + vm_latest::{HistoryEnabled, Vm}, }; -#[derive(Debug, Default)] - -struct TestTxInfo { - calldata: Vec, - fee_overrides: Option, - should_fail: bool, -} - -fn test_storage(txs: Vec) -> u32 { - let bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let test_contract_address = Address::random(); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(txs.len() as u32) - .with_custom_contracts(vec![(bytecode, test_contract_address, false)]) - .build(); - - let mut last_result = None; - - for (id, tx) in txs.into_iter().enumerate() { - let TestTxInfo { - calldata, - fee_overrides, - should_fail, - } = tx; - - let account = &mut vm.rich_accounts[id]; - - vm.vm.make_snapshot(); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: 0.into(), - factory_deps: vec![], - }, - fee_overrides, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - if should_fail { - assert!(result.result.is_failed(), "Transaction should fail"); - vm.vm.rollback_to_the_latest_snapshot(); - } else { - assert!(!result.result.is_failed(), "Transaction should not fail"); - vm.vm.pop_snapshot_no_rollback(); - } - - last_result = Some(result); - } - - last_result.unwrap().statistics.pubdata_published -} - -fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { - test_storage(vec![ - TestTxInfo::default(), - TestTxInfo { - calldata: second_tx_calldata, - fee_overrides: None, - should_fail: false, - }, - ]) -} - -#[test] -fn test_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - // In all of the tests below we provide the first tx to ensure that the tracers will not include - // the statistics from the start of the bootloader and will only include those for the transaction itself. - - let base_pubdata = test_storage_one_tx(vec![]); - let simple_test_pubdata = test_storage_one_tx( - contract - .function("simpleWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_pubdata = test_storage_one_tx( - contract - .function("resettingWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_via_revert_pubdata = test_storage_one_tx( - contract - .function("resettingWriteViaRevert") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - - assert_eq!(simple_test_pubdata - base_pubdata, 65); - assert_eq!(resetting_write_pubdata - base_pubdata, 34); - assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); -} - #[test] -fn test_transient_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let first_tstore_test = contract - .function("testTransientStore") - .unwrap() - .encode_input(&[]) - .unwrap(); - // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. - let second_tstore_test = contract - .function("assertTValue") - .unwrap() - .encode_input(&[Token::Uint(U256::zero())]) - .unwrap(); - - test_storage(vec![ - TestTxInfo { - calldata: first_tstore_test, - ..TestTxInfo::default() - }, - TestTxInfo { - calldata: second_tstore_test, - ..TestTxInfo::default() - }, - ]); +fn storage_behavior() { + test_storage_behavior::>(); } #[test] -fn test_transient_storage_behavior_panic() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let basic_tstore_test = contract - .function("tStoreAndRevert") - .unwrap() - .encode_input(&[Token::Uint(U256::one()), Token::Bool(false)]) - .unwrap(); - - let small_fee = Fee { - // Something very-very small to make the validation fail - gas_limit: 10_000.into(), - ..Account::default_fee() - }; - - test_storage(vec![ - TestTxInfo { - calldata: basic_tstore_test.clone(), - ..TestTxInfo::default() - }, - TestTxInfo { - fee_overrides: Some(small_fee), - should_fail: true, - ..TestTxInfo::default() - }, - TestTxInfo { - calldata: basic_tstore_test, - ..TestTxInfo::default() - }, - ]); +fn transient_storage_behavior() { + test_transient_storage_behavior::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs deleted file mode 100644 index c0ef52afaa5..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_5_0::{aux_structures::Timestamp, vm_state::VmLocalState}; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - interface::storage::WriteStorage, - vm_latest::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_io_refunds: HistoryRecorder, H>, - pub(crate) returned_pubdata_costs: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.storage_frames_stack.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_io_refunds: self.state.storage.returned_io_refunds.clone(), - returned_pubdata_costs: self.state.storage.returned_pubdata_costs.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d980..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs deleted file mode 100644 index 1fe4232c778..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ /dev/null @@ -1,299 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use zksync_types::{ - block::L2BlockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}, - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, - VmInterface, VmInterfaceExt, - }, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(&self.storage).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs index 2db37881352..a2cd6af6211 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs @@ -1,54 +1,9 @@ -use zksync_types::{Execute, H160}; - use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_latest::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, + versions::testonly::tracing_execution_error::test_tracing_of_execution_errors, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(contract_address), - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); +fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs index 2c380623636..f37ebe6a3fb 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs @@ -1,220 +1,16 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::get_balance, - }, - HistoryEnabled, + versions::testonly::transfer::{ + test_reentrancy_protection_send_and_transfer, test_send_and_transfer, }, + vm_latest::{HistoryEnabled, Vm}, }; -enum TestOptions { - Send(U256), - Transfer(U256), -} - -fn test_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let recipeint_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - - let test_contract_address = Address::random(); - let recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - }; - - let mut storage = get_empty_storage(); - storage.set_value( - storage_key_for_eth_balance(&test_contract_address), - u256_to_h256(value), - ); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - (recipeint_bytecode, recipient_address, false), - ]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx_result.result.is_failed(), - "Transaction wasn't successful" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); - - let new_recipient_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &recipient_address, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!(new_recipient_balance, value); -} - #[test] -fn test_send_and_transfer() { - test_send_or_transfer(TestOptions::Send(U256::zero())); - test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); -} - -fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipeint_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - - let test_contract_address = Address::random(); - let reentrant_recipeint_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipeint_address), - Token::Uint(value), - ]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipeint_address), - Token::Uint(value), - ]) - .unwrap(), - ), - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - ( - reentrant_recipeint_bytecode, - reentrant_recipeint_address, - false, - ), - ]) - .build(); - - // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. - let account = &mut vm.rich_accounts[0]; - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(reentrant_recipeint_address), - calldata: reentrant_recipient_abi - .function("setX") - .unwrap() - .encode_input(&[]) - .unwrap(), - value: U256::from(1), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx1_result.result.is_failed(), - "Transaction 1 wasn't successful" - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value, - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - tx2_result.result.is_failed(), - "Transaction 2 should have failed, but it succeeded" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +fn send_and_transfer() { + test_send_and_transfer::>(); } #[test] -fn test_reentrancy_protection_send_and_transfer() { - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( - U256::from(10).pow(18.into()), - )); +fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index d85a504de40..9889e26e4d2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -1,354 +1,21 @@ -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - vm_latest::{ - tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, - HistoryEnabled, + versions::testonly::upgrade::{ + test_complex_upgrader, test_force_deploy_upgrade, test_protocol_upgrade_is_first, }, + vm_latest::{HistoryEnabled, Vm}, }; -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block #[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); +fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::>(); } -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. #[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); +fn force_deploy_upgrade() { + test_force_deploy_upgrade::>(); } -/// Here we show how the work with the complex upgrader could be done #[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: Some(COMPLEX_UPGRADER_ADDRESS), - calldata: complex_upgrader_calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") +fn complex_upgrader() { + test_complex_upgrader::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs deleted file mode 100644 index 34582fb9dde..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ /dev/null @@ -1,142 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, BaseSystemContracts, - SystemContractCode, -}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::{ - interface::storage::{StoragePtr, WriteStorage}, - vm_latest::{tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode}, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_bootloader_code(test); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn read_simple_transfer_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/simple-transfer/simple-transfer.sol/SimpleTransfer.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn load_precompiles_contract() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -pub(crate) fn read_expensive_contract() -> (Vec, Contract) { - const PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; - (read_bytecode(PATH), load_contract(PATH)) -} - -pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { - const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; - (read_bytecode(PATH), load_contract(PATH)) -} diff --git a/core/lib/vm_interface/src/storage/view.rs b/core/lib/vm_interface/src/storage/view.rs index ec9267609e2..249d584c9f6 100644 --- a/core/lib/vm_interface/src/storage/view.rs +++ b/core/lib/vm_interface/src/storage/view.rs @@ -102,6 +102,16 @@ impl StorageView { pub fn cache(&self) -> StorageViewCache { self.cache.clone() } + + /// Provides mutable access to the underlying storage. + /// + /// # Warning + /// + /// Mutating the underlying storage directly can easily break implied `StorageView` invariants, so use with care. + #[doc(hidden)] + pub fn inner_mut(&mut self) -> &mut S { + &mut self.storage_handle + } } impl ReadStorage for Box diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 39a36694526..cfb539c0e0f 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -54,6 +54,12 @@ impl Account { Self::new(K256PrivateKey::random_using(rng)) } + /// Creates an account deterministically from the provided seed. + pub fn from_seed(seed: u32) -> Self { + let private_key_bytes = H256::from_low_u64_be(u64::from(seed) + 1); + Self::new(K256PrivateKey::from_bytes(private_key_bytes).unwrap()) + } + pub fn get_l2_tx_for_execute(&mut self, execute: Execute, fee: Option) -> Transaction { let tx = self.get_l2_tx_for_execute_with_nonce(execute, fee, self.nonce); self.nonce += 1; From 2151c2832498ca6e7ee1eee0bfdf6a0568345fee Mon Sep 17 00:00:00 2001 From: Vladislav Volosnikov Date: Fri, 18 Oct 2024 14:44:03 +0200 Subject: [PATCH 088/140] feat(prover): Update witness generator to zkevm_test_harness 0.150.6 (#3029) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- Cargo.lock | 58 +++---- Cargo.toml | 6 +- prover/Cargo.lock | 132 ++++++---------- prover/Cargo.toml | 10 +- .../src/rounds/basic_circuits/utils.rs | 149 +++++------------- .../crates/bin/witness_generator/src/utils.rs | 51 +----- prover/crates/lib/keystore/src/keystore.rs | 1 + 7 files changed, 133 insertions(+), 274 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 887b71c39ec..d3d75146aee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1313,14 +1313,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" +checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" dependencies = [ "derivative", "serde", - "zk_evm 0.150.5", - "zkevm_circuits 0.150.5", + "zk_evm 0.150.6", + "zkevm_circuits 0.150.6", ] [[package]] @@ -1380,11 +1380,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" +checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.6", "derivative", "rayon", "serde", @@ -9342,9 +9342,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" +checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" dependencies = [ "anyhow", "lazy_static", @@ -9352,7 +9352,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.5", + "zk_evm_abstractions 0.150.6", ] [[package]] @@ -9383,15 +9383,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" +checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", ] [[package]] @@ -9440,9 +9440,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" +checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" dependencies = [ "arrayvec 0.7.6", "boojum", @@ -9454,7 +9454,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", "zksync_cs_derive", ] @@ -9502,9 +9502,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" +checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -9628,7 +9628,7 @@ dependencies = [ "anyhow", "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "futures 0.3.30", "itertools 0.10.5", "num_cpus", @@ -9640,7 +9640,7 @@ dependencies = [ "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -10355,9 +10355,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" +checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" dependencies = [ "boojum", "derivative", @@ -10367,7 +10367,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.5", + "zkevm_circuits 0.150.6", ] [[package]] @@ -10494,7 +10494,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "ethabi", "hex", "itertools 0.10.5", @@ -10508,7 +10508,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_contracts", "zksync_eth_signer", "zksync_system_constants", @@ -10549,7 +10549,7 @@ dependencies = [ "tower-http", "tracing", "vise", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_config", "zksync_consensus_roles", "zksync_contracts", @@ -10945,7 +10945,7 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "serde", "serde_json", "serde_with", @@ -11309,8 +11309,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754 dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", + "zk_evm_abstractions 0.150.6", + "zkevm_opcode_defs 0.150.6", "zksync_vm2_interface", ] diff --git a/Cargo.toml b/Cargo.toml index 940d5dd036b..4090f28cd0e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -218,15 +218,15 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.5" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.6" } crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.30.1" } -kzg = { package = "zksync_kzg", version = "=0.150.5" } +kzg = { package = "zksync_kzg", version = "=0.150.6" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } zk_evm_1_3_3 = { package = "zk_evm", version = "0.133" } zk_evm_1_4_0 = { package = "zk_evm", version = "0.140" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.6" } # New VM; pinned to a specific commit because of instability zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "a233d44bbe61dc6a758a754c3b78fe4f83e56699" } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 1d584a473d9..206cef01ba1 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -304,7 +304,7 @@ version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" dependencies = [ - "bindgen 0.69.4", + "bindgen", "cc", "cmake", "dunce", @@ -461,29 +461,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.59.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "clap 2.34.0", - "env_logger 0.9.3", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2 1.0.85", - "quote 1.0.36", - "regex", - "rustc-hash", - "shlex", - "which", -] - [[package]] name = "bindgen" version = "0.69.4" @@ -674,9 +651,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f9a6d958dd58a0899737e5a1fc6597aefcf7980bf8be5be5329e701cbd45ca" +checksum = "98c681a3f867afe40bcc188e5cb5260bbf5699531823affa3cbe28f7ca9b7bc9" dependencies = [ "boojum", "cmake", @@ -822,11 +799,11 @@ dependencies = [ [[package]] name = "circuit_definitions" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b532214f063e5e0ee5c0fc1d3afd56dec541efa68b8985f14cc55cc324f4c48" +checksum = "492404ea63c934d8e894325f0a741723bf91cd035cb34a92fddd8617c4a00fd3" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.6", "crossbeam", "derivative", "seq-macro", @@ -872,14 +849,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" +checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" dependencies = [ "derivative", "serde", - "zk_evm 0.150.5", - "zkevm_circuits 0.150.5", + "zk_evm 0.150.6", + "zkevm_circuits 0.150.6", ] [[package]] @@ -939,11 +916,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" +checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.6", "derivative", "rayon", "serde", @@ -1764,9 +1741,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f0d6e329b2c11d134c3140951209be968ef316ed64ddde75640eaed7f10264" +checksum = "c1e1990fee6e9d25b40524ce53ca7977a211155a17bc7277f4dd354633e4fc22" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1775,9 +1752,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060e8186234c7a281021fb95614e06e94e1fc7ab78938360a5c27af0f8fc6105" +checksum = "d84e8d300c28cd91ceb56340f66da8607409f44a45f5e694e23723630db8c852" dependencies = [ "serde_json", ] @@ -4103,12 +4080,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "3.0.4" @@ -5711,9 +5682,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebb6d928451f0779f14da02ee9d51d4bde560328edc6471f0d5c5c11954345c4" +checksum = "92776ca824f49c255a7417939706d759e0fd3dd4217420d01da68beae04f0bd6" dependencies = [ "bincode", "blake2 0.10.6", @@ -7479,9 +7450,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" +checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" dependencies = [ "anyhow", "lazy_static", @@ -7489,7 +7460,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.5", + "zk_evm_abstractions 0.150.6", ] [[package]] @@ -7520,22 +7491,22 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" +checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", ] [[package]] name = "zkevm-assembly" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99106038062537c05b4e6e7754d1bbba28ba16185a3e5ee5ad22e2f8be883bb" +checksum = "5dc743ac7b0d618536dc3ace798fd4b8af78b057884afda5785c7970e15d62d0" dependencies = [ "env_logger 0.9.3", "hex", @@ -7548,7 +7519,7 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", ] [[package]] @@ -7597,9 +7568,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" +checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7611,7 +7582,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", "zksync_cs_derive", ] @@ -7659,9 +7630,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" +checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7676,13 +7647,13 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "550f82d3b7448c35168dc13bfadbccd5fd306097b6e1ea01793151c1c9137a36" +checksum = "73ad3e73d290a38a35dd245fd68cb6f498a8a8da4a52f846e88da3d3c31a34fd" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "codegen", "crossbeam", "derivative", @@ -7703,11 +7674,10 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86511b3957adfe415ecdbd1ee01c51aa3ca131a607e61ca024976312f613b0f9" +checksum = "d555e24b853359c5b076c52f9ff9e0ed62a7edc8c2f82f93517c524410c21ecb" dependencies = [ - "bindgen 0.59.2", "cmake", "crossbeam", "derivative", @@ -7719,9 +7689,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e4c00f2db603d1b696bc2e9d822bb4c087050de5b65559067fc2232786cbc93" +checksum = "615dad34e5fe678ec3b3e029af3f19313bebb1b771a8ce963c9ab9a8cc3879d3" dependencies = [ "bit-vec", "cfg-if", @@ -7736,9 +7706,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58df1ec10e0d5eb58563bb01abda5ed185c9b9621502e361848ca40eb7868ac" +checksum = "80721b2da2643bd43f664ac65673ee078e6973c0a88d75b73bfaeac8e1bf5432" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -8106,9 +8076,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" +checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" dependencies = [ "boojum", "derivative", @@ -8118,7 +8088,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.5", + "zkevm_circuits 0.150.6", ] [[package]] @@ -8154,7 +8124,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "ethabi", "hex", "itertools 0.10.5", @@ -8166,7 +8136,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_contracts", "zksync_system_constants", "zksync_types", @@ -8218,7 +8188,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -8444,7 +8414,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "serde", "serde_with", "strum", @@ -8643,8 +8613,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754 dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", + "zk_evm_abstractions 0.150.6", + "zkevm_opcode_defs 0.150.6", "zksync_vm2_interface", ] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 742eee649de..61169dd4363 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -62,13 +62,13 @@ url = "2.5.2" vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.5" -circuit_sequencer_api = "=0.150.5" -zkevm_test_harness = "=0.150.5" +circuit_definitions = "=0.150.6" +circuit_sequencer_api = "=0.150.6" +zkevm_test_harness = "=0.150.6" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.9" } -shivini = "=0.150.9" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.0" } +shivini = "=0.151.0" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } diff --git a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs index a8bc59bd45e..31dc5481410 100644 --- a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs @@ -5,7 +5,7 @@ use std::{ }; use circuit_definitions::{ - circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, + circuit_definitions::base_layer::ZkSyncBaseLayerStorage, encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, }; @@ -21,7 +21,7 @@ use zksync_multivm::{ zk_evm_latest::ethereum_types::Address, }; use zksync_object_store::ObjectStore; -use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; +use zksync_prover_fri_types::keys::ClosedFormInputKey; use zksync_prover_interface::inputs::WitnessInputData; use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::L1BatchNumber; @@ -31,8 +31,7 @@ use crate::{ rounds::basic_circuits::Witness, storage_oracle::StorageOracle, utils::{ - expand_bootloader_contents, save_circuit, save_ram_premutation_queue_witness, - ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, + expand_bootloader_contents, save_circuit, ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, }; @@ -64,17 +63,38 @@ pub(super) async fn generate_witness( let (circuit_sender, mut circuit_receiver) = tokio::sync::mpsc::channel(1); let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); - let (ram_permutation_queue_sender, mut ram_permutation_queue_receiver) = - tokio::sync::mpsc::channel(1); let make_circuits_span = tracing::info_span!("make_circuits"); let make_circuits_span_copy = make_circuits_span.clone(); + + use std::{sync::mpsc::sync_channel, thread}; + let (artifacts_sender, artifacts_receiver) = sync_channel(1); + + let artifacts_receiver_handle = thread::spawn(move || { + let span = tracing::info_span!(parent: make_circuits_span_copy, "make_circuits_blocking"); + + while let Ok(artifact) = artifacts_receiver.recv() { + match artifact { + WitnessGenerationArtifact::BaseLayerCircuit(circuit) => { + let parent_span = span.clone(); + tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { + circuit_sender + .blocking_send(circuit) + .expect("failed to send circuit from harness"); + }); + } + WitnessGenerationArtifact::RecursionQueue((a, b, c)) => queue_sender + .blocking_send((a as u8, b, c)) + .expect("failed to send recursion queue from harness"), + _ => {} + } + } + }); + // Blocking call from harness that does the CPU heavy lifting. // Provides circuits and recursion queue via callback functions and returns scheduler witnesses. // Circuits are "streamed" one by one as they're being generated. let make_circuits_handle = tokio::task::spawn_blocking(move || { - let span = tracing::info_span!(parent: make_circuits_span_copy, "make_circuits_blocking"); - let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); let storage_view = StorageView::new(witness_storage).to_rc_ptr(); @@ -91,33 +111,11 @@ pub(super) async fn generate_witness( .to_str() .expect("Path to KZG trusted setup is not a UTF-8 string"); - let artifacts_callback = |artifact: WitnessGenerationArtifact| match artifact { - WitnessGenerationArtifact::BaseLayerCircuit(circuit) => { - let parent_span = span.clone(); - tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { - circuit_sender - .blocking_send(circuit) - .expect("failed to send circuit from harness"); - }); - } - WitnessGenerationArtifact::RecursionQueue((a, b, c)) => queue_sender - .blocking_send((a as u8, b, c)) - .expect("failed to send recursion queue from harness"), - a @ WitnessGenerationArtifact::MemoryQueueWitness(_) => { - let parent_span = span.clone(); - tracing::info_span!(parent: parent_span, "send_ram_permutation_queue_witness") - .in_scope(|| { - ram_permutation_queue_sender - .blocking_send(a) - .expect("failed to send ram permutation queue sitness from harness"); - }); - } - }; - let evm_emulator_code_hash = input.vm_run_data.evm_emulator_code_hash; // By convention, default AA is used instead of the EVM emulator if the latter is disabled. let evm_emulator_code_hash = evm_emulator_code_hash.unwrap_or(input.vm_run_data.default_account_code_hash); + let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, @@ -132,9 +130,9 @@ pub(super) async fn generate_witness( geometry_config, storage_oracle, tree, - path, + path.to_owned(), input.eip_4844_blobs.blobs(), - artifacts_callback, + artifacts_sender, ); (scheduler_witness, block_witness) }) @@ -153,8 +151,6 @@ pub(super) async fn generate_witness( // If the order is tampered with, proving will fail (as the proof would be computed for a different sequence of instruction). let mut circuit_sequence = 0; - let mut ram_circuit_sequence = 0; - while let Some(circuit) = circuit_receiver .recv() .instrument(tracing::info_span!("wait_for_circuit")) @@ -169,26 +165,9 @@ pub(super) async fn generate_witness( .await .expect("failed to get permit for running save circuit task"); - let partial_circuit_aux_data = match &circuit { - ZkSyncBaseLayerCircuit::RAMPermutation(_) => { - let circuit_subsequence_number = ram_circuit_sequence; - ram_circuit_sequence += 1; - Some(CircuitAuxData { - circuit_subsequence_number, - }) - } - _ => None, - }; - save_circuit_handles.push(tokio::task::spawn(async move { - let (circuit_id, circuit_url) = save_circuit( - block_number, - circuit, - sequence, - partial_circuit_aux_data, - object_store, - ) - .await; + let (circuit_id, circuit_url) = + save_circuit(block_number, circuit, sequence, object_store).await; drop(permit); (circuit_id, circuit_url) })); @@ -196,57 +175,6 @@ pub(super) async fn generate_witness( } .instrument(save_circuits_span); - let mut save_ram_queue_witness_handles = vec![]; - - let save_ram_queue_witness_span = tracing::info_span!("save_circuits"); - - // Future which receives part of RAM permutation circuits witnesses and saves them async. - // Uses semaphore because these artifacts are of significant size - let ram_queue_witness_receiver_handle = async { - let mut sorted_sequence = 0; - let mut unsorted_sequence = 0; - - while let Some(witness_artifact) = ram_permutation_queue_receiver - .recv() - .instrument(tracing::info_span!("wait_for_ram_witness")) - .await - { - let object_store = object_store.clone(); - let semaphore = semaphore.clone(); - let permit = semaphore - .acquire_owned() - .await - .expect("failed to get permit for running save ram permutation queue witness task"); - let (is_sorted, witness, sequence) = match witness_artifact { - WitnessGenerationArtifact::MemoryQueueWitness((witness, sorted)) => { - let sequence = if sorted { - let sequence = sorted_sequence; - sorted_sequence += 1; - sequence - } else { - let sequence = unsorted_sequence; - unsorted_sequence += 1; - sequence - }; - (sorted, witness, sequence) - } - _ => panic!("Invalid artifact received"), - }; - save_ram_queue_witness_handles.push(tokio::task::spawn(async move { - let _ = save_ram_premutation_queue_witness( - block_number, - sequence, - is_sorted, - witness, - object_store, - ) - .await; - drop(permit); - })); - } - } - .instrument(save_ram_queue_witness_span); - let mut save_queue_handles = vec![]; let save_queues_span = tracing::info_span!("save_queues"); @@ -272,11 +200,10 @@ pub(super) async fn generate_witness( } .instrument(save_queues_span); - let (witnesses, _, _, _) = tokio::join!( + let (witnesses, _, _) = tokio::join!( make_circuits_handle, circuit_receiver_handle, - queue_receiver_handle, - ram_queue_witness_receiver_handle + queue_receiver_handle ); let (mut scheduler_witness, block_aux_witness) = witnesses.unwrap(); @@ -301,11 +228,7 @@ pub(super) async fn generate_witness( .filter(|(circuit_id, _, _)| circuits_present.contains(circuit_id)) .collect(); - let _: Vec<_> = futures::future::join_all(save_ram_queue_witness_handles) - .await - .into_iter() - .map(|result| result.expect("failed to save ram permutation queue witness")) - .collect(); + artifacts_receiver_handle.join().unwrap(); scheduler_witness.previous_block_meta_hash = input.previous_batch_metadata.meta_hash.0; scheduler_witness.previous_block_aux_hash = input.previous_batch_metadata.aux_hash.0; diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index 8524bdae9ff..ea631f19cd8 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -3,10 +3,7 @@ use std::{ sync::Arc, }; -use circuit_definitions::{ - circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, - encodings::memory_query::MemoryQueueStateWitnesses, -}; +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; use once_cell::sync::Lazy; use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; use zksync_multivm::utils::get_used_bootloader_memory_bytes; @@ -24,8 +21,8 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness, }, - keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey, RamPermutationQueueWitnessKey}, - CircuitAuxData, CircuitWrapper, FriProofWrapper, RamPermutationQueueWitness, + keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey}, + CircuitWrapper, FriProofWrapper, }; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber, ProtocolVersionId, U256}; @@ -121,7 +118,6 @@ pub async fn save_circuit( block_number: L1BatchNumber, circuit: ZkSyncBaseLayerCircuit, sequence_number: usize, - aux_data_for_partial_circuit: Option, object_store: Arc, ) -> (u8, String) { let circuit_id = circuit.numeric_circuit_type(); @@ -133,43 +129,12 @@ pub async fn save_circuit( depth: 0, }; - let blob_url = if let Some(aux_data_for_partial_circuit) = aux_data_for_partial_circuit { - object_store - .put( - circuit_key, - &CircuitWrapper::BasePartial((circuit, aux_data_for_partial_circuit)), - ) - .await - .unwrap() - } else { - object_store - .put(circuit_key, &CircuitWrapper::Base(circuit)) - .await - .unwrap() - }; - (circuit_id, blob_url) -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number) -)] -pub async fn save_ram_premutation_queue_witness( - block_number: L1BatchNumber, - circuit_subsequence_number: usize, - is_sorted: bool, - witness: MemoryQueueStateWitnesses, - object_store: Arc, -) -> String { - let witness_key = RamPermutationQueueWitnessKey { - block_number, - circuit_subsequence_number, - is_sorted, - }; - object_store - .put(witness_key, &RamPermutationQueueWitness { witness }) + let blob_url = object_store + .put(circuit_key, &CircuitWrapper::Base(circuit)) .await - .unwrap() + .unwrap(); + + (circuit_id, blob_url) } #[tracing::instrument( diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 6225943e3cd..ab3b115bc63 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -470,6 +470,7 @@ impl Keystore { } /// Async loads mapping of all circuits to setup key, if successful + #[cfg(feature = "gpu")] pub async fn load_all_setup_key_mapping( &self, ) -> anyhow::Result>> { From e466b52948e3c4ed1cb5af4fd999a52028e4d216 Mon Sep 17 00:00:00 2001 From: Joonatan Saarhelo Date: Fri, 18 Oct 2024 14:12:17 +0100 Subject: [PATCH 089/140] feat: vm2 tracers can access storage (#3114) Integration branch for adding storage access to the vm2 tracer API. Required for validation tracer. --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs | 4 ++-- core/tests/vm-benchmark/src/vm.rs | 2 +- prover/Cargo.lock | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3d75146aee..eac6e9771f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11305,7 +11305,7 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.2.1" -source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "enum_dispatch", "primitive-types", @@ -11317,7 +11317,7 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" version = "0.2.1" -source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "primitive-types", ] diff --git a/Cargo.toml b/Cargo.toml index 4090f28cd0e..f1e70e7f302 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -229,7 +229,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.6" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "a233d44bbe61dc6a758a754c3b78fe4f83e56699" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "df5bec3d04d64d434f9b0ccb285ba4681008f7b3" } # Consensus dependencies. zksync_concurrency = "=0.5.0" diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs index b48ec7eacb0..f588f20ab25 100644 --- a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -1,5 +1,5 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_vm2::interface::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm2::interface::{CycleStats, GlobalStateInterface, Opcode, OpcodeType, Tracer}; use zksync_vm_interface::CircuitStatistic; use crate::vm_latest::tracers::circuits_capacity::*; @@ -24,7 +24,7 @@ pub struct CircuitsTracer { } impl Tracer for CircuitsTracer { - fn after_instruction(&mut self, _state: &mut S) { + fn after_instruction(&mut self, _: &mut S) { self.main_vm_cycles += 1; match OP::VALUE { diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index 30e2321298f..dddef0de82f 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -103,7 +103,7 @@ impl BenchmarkingVmFactory for Fast impl vm_fast::Tracer for InstructionCount { fn before_instruction< OP: zksync_vm2::interface::OpcodeType, - S: zksync_vm2::interface::StateInterface, + S: zksync_vm2::interface::GlobalStateInterface, >( &mut self, _: &mut S, diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 206cef01ba1..1408f2b23cd 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8609,7 +8609,7 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.2.1" -source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "enum_dispatch", "primitive-types", @@ -8621,7 +8621,7 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" version = "0.2.1" -source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "primitive-types", ] From 5d5214ba983823b306495d34fdd1d46abacce07a Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Sat, 19 Oct 2024 00:47:33 +1100 Subject: [PATCH 090/140] fix(external-node): delete empty unsealed batch on EN initialization (#3125) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR reverts #3088 as I have realized it is going to be very hard to make this fix work by going in that direction. Basically initializing with an empty unsealed batch causes a lot of issues and the existing state keeper/external IO flow heavily relies on us having at least one at the start to initialize correctly. Will leave more context in the comments. Feel free to review individual commits to not see revert changelog. ## Why ❔ This bug causes EN to panic ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- Cargo.lock | 1 + ...81f4625ebd593aa4cd2bae79bcc0637387d78.json | 22 ++++ core/lib/dal/src/blocks_dal.rs | 45 ++++++++ core/node/consensus/src/testonly.rs | 9 +- core/node/node_sync/Cargo.toml | 1 + core/node/node_sync/src/external_io.rs | 8 ++ core/node/node_sync/src/fetcher.rs | 36 +------ core/node/node_sync/src/sync_action.rs | 12 +++ core/node/node_sync/src/tests.rs | 101 +++++++++++++++++- 9 files changed, 195 insertions(+), 40 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json diff --git a/Cargo.lock b/Cargo.lock index eac6e9771f5..a5e51346bdf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10777,6 +10777,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", + "backon", "chrono", "futures 0.3.30", "once_cell", diff --git a/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json b/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json new file mode 100644 index 00000000000..b40bdca666b --- /dev/null +++ b/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM l1_batches\n WHERE\n number > $1\n AND NOT is_sealed\n RETURNING number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78" +} diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index bf1b48130c4..f71dc68ce75 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -2058,6 +2058,37 @@ impl BlocksDal<'_, '_> { Ok(()) } + /// Deletes the unsealed L1 batch from the storage. Expects the caller to make sure there are no + /// associated L2 blocks. + /// + /// Accepts `batch_to_keep` as a safety mechanism. + pub async fn delete_unsealed_l1_batch( + &mut self, + batch_to_keep: L1BatchNumber, + ) -> DalResult<()> { + let deleted_row = sqlx::query!( + r#" + DELETE FROM l1_batches + WHERE + number > $1 + AND NOT is_sealed + RETURNING number + "#, + i64::from(batch_to_keep.0) + ) + .instrument("delete_unsealed_l1_batch") + .with_arg("batch_to_keep", &batch_to_keep) + .fetch_optional(self.storage) + .await?; + if let Some(deleted_row) = deleted_row { + tracing::info!( + l1_batch_number = %deleted_row.number, + "Deleted unsealed batch" + ); + } + Ok(()) + } + /// Deletes all L1 batches from the storage so that the specified batch number is the last one left. pub async fn delete_l1_batches(&mut self, last_batch_to_keep: L1BatchNumber) -> DalResult<()> { self.delete_l1_batches_inner(Some(last_batch_to_keep)).await @@ -2184,6 +2215,20 @@ impl BlocksDal<'_, '_> { Ok(Some((L2BlockNumber(min as u32), L2BlockNumber(max as u32)))) } + /// Returns `true` if there exists a non-sealed batch (i.e. there is one+ stored L2 block that isn't assigned + /// to any batch yet). + pub async fn pending_batch_exists(&mut self) -> DalResult { + let count = sqlx::query_scalar!( + "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" + ) + .instrument("pending_batch_exists") + .fetch_one(self.storage) + .await? + .unwrap_or(0); + + Ok(count != 0) + } + // methods used for measuring Eth tx stage transition latencies // and emitting metrics base on these measured data pub async fn oldest_uncommitted_batch_timestamp(&mut self) -> DalResult> { diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 98c0d6b0813..4ebcf5c9a61 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -219,11 +219,10 @@ impl StateKeeper { .wait(IoCursor::for_fetcher(&mut conn.0)) .await? .context("IoCursor::new()")?; - let batch_sealed = ctx - .wait(conn.0.blocks_dal().get_unsealed_l1_batch()) + let pending_batch = ctx + .wait(conn.0.blocks_dal().pending_batch_exists()) .await? - .context("get_unsealed_l1_batch()")? - .is_none(); + .context("pending_batch_exists()")?; let (actions_sender, actions_queue) = ActionQueue::new(); let addr = sync::watch::channel(None).0; let sync_state = SyncState::default(); @@ -259,7 +258,7 @@ impl StateKeeper { last_batch: cursor.l1_batch, last_block: cursor.next_l2_block - 1, last_timestamp: cursor.prev_l2_block_timestamp, - batch_sealed, + batch_sealed: !pending_batch, next_priority_op: PriorityOpId(1), actions_sender, sync_state: sync_state.clone(), diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index ccfc8dd8a4e..9c5b0c00070 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -43,3 +43,4 @@ zksync_node_test_utils.workspace = true assert_matches.workspace = true once_cell.workspace = true test-casing.workspace = true +backon.workspace = true diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 10fb2925015..5e3a5ce9f46 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -155,6 +155,14 @@ impl StateKeeperIO for ExternalIO { ) })?; let Some(mut pending_l2_block_header) = pending_l2_block_header else { + tracing::info!( + l1_batch_number = %cursor.l1_batch, + "No pending L2 blocks found; pruning unsealed batch if exists as we need at least one L2 block to initialize" + ); + storage + .blocks_dal() + .delete_unsealed_l1_batch(cursor.l1_batch - 1) + .await?; return Ok((cursor, None)); }; diff --git a/core/node/node_sync/src/fetcher.rs b/core/node/node_sync/src/fetcher.rs index 3f8558ed0ac..51b9f7c7a06 100644 --- a/core/node/node_sync/src/fetcher.rs +++ b/core/node/node_sync/src/fetcher.rs @@ -114,8 +114,8 @@ impl IoCursorExt for IoCursor { let mut this = Self::new(storage).await?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. - let unsealed_batch = storage.blocks_dal().get_unsealed_l1_batch().await?; - if unsealed_batch.is_none() { + let was_new_batch_open = storage.blocks_dal().pending_batch_exists().await?; + if !was_new_batch_open { this.l1_batch -= 1; // Should continue from the last L1 batch present in the storage } Ok(this) @@ -201,35 +201,3 @@ impl IoCursorExt for IoCursor { new_actions } } - -#[cfg(test)] -mod tests { - use zksync_dal::{ConnectionPool, Core, CoreDal}; - use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; - use zksync_state_keeper::io::IoCursor; - use zksync_types::{block::UnsealedL1BatchHeader, L1BatchNumber}; - - use crate::fetcher::IoCursorExt; - - #[tokio::test] - async fn io_cursor_recognizes_empty_unsealed_batch() -> anyhow::Result<()> { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - insert_genesis_batch(&mut conn, &GenesisParams::mock()) - .await - .unwrap(); - conn.blocks_dal() - .insert_l1_batch(UnsealedL1BatchHeader { - number: L1BatchNumber(1), - timestamp: 1, - protocol_version: None, - fee_address: Default::default(), - fee_input: Default::default(), - }) - .await?; - - let io_cursor = IoCursor::for_fetcher(&mut conn).await?; - assert_eq!(io_cursor.l1_batch, L1BatchNumber(1)); - Ok(()) - } -} diff --git a/core/node/node_sync/src/sync_action.rs b/core/node/node_sync/src/sync_action.rs index 8cb90d24fe8..e3fd56ae9bb 100644 --- a/core/node/node_sync/src/sync_action.rs +++ b/core/node/node_sync/src/sync_action.rs @@ -33,6 +33,18 @@ impl ActionQueueSender { Ok(()) } + /// Pushes a single action into the queue without checking validity of the sequence. + /// + /// Useful to simulate situations where only a part of the sequence was executed on the node. + #[cfg(test)] + pub async fn push_action_unchecked(&self, action: SyncAction) -> anyhow::Result<()> { + self.0 + .send(action) + .await + .map_err(|_| anyhow::anyhow!("node action processor stopped"))?; + Ok(()) + } + /// Checks whether the action sequence is valid. /// Returned error is meant to be used as a panic message, since an invalid sequence represents an unrecoverable /// error. This function itself does not panic for the ease of testing. diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 3f5791cdf24..1ae148709b2 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -2,6 +2,7 @@ use std::{iter, sync::Arc, time::Duration}; +use backon::{ConstantBuilder, Retryable}; use test_casing::test_casing; use tokio::{sync::watch, task::JoinHandle}; use zksync_contracts::BaseSystemContractsHashes; @@ -18,7 +19,7 @@ use zksync_state_keeper::{ }; use zksync_types::{ api, - block::L2BlockHasher, + block::{L2BlockHasher, UnsealedL1BatchHeader}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -652,3 +653,101 @@ async fn external_io_with_multiple_l1_batches() { assert_eq!(fictive_l2_block.timestamp, 2); assert_eq!(fictive_l2_block.l2_tx_count, 0); } + +async fn wait_for_batch_to_be_open( + pool: &ConnectionPool, + number: L1BatchNumber, +) -> anyhow::Result { + (|| async { + let mut storage = pool.connection().await.unwrap(); + let unsealed_batch = storage.blocks_dal().get_unsealed_l1_batch().await?; + + if let Some(unsealed_batch) = unsealed_batch { + if unsealed_batch.number == number { + Ok(unsealed_batch) + } else { + Err(anyhow::anyhow!("L1 batch #{number} is not open yet")) + } + } else { + Err(anyhow::anyhow!("No unsealed L1 batch found yet")) + } + }) + .retry( + &ConstantBuilder::default() + .with_delay(Duration::from_millis(200)) + .with_max_times(20), + ) + .await +} + +#[tokio::test] +async fn external_io_empty_unsealed_batch() { + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + ensure_genesis(&mut storage).await; + drop(storage); + + let open_batch_one = open_l1_batch(1, 1, 1); + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + let tx = FetchedTransaction::new(tx.into()); + let open_batch_two = open_l1_batch(2, 2, 3); + let fictive_l2_block = SyncAction::L2Block { + params: L2BlockParams { + timestamp: 2, + virtual_blocks: 0, + }, + number: L2BlockNumber(2), + }; + let actions1 = vec![open_batch_one, tx.into(), SyncAction::SealL2Block]; + let actions2 = vec![fictive_l2_block, SyncAction::SealBatch]; + + let (actions_sender, action_queue) = ActionQueue::new(); + let client = MockMainNodeClient::default(); + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&[tx_hash]]).await; + actions_sender.push_actions(actions1).await.unwrap(); + actions_sender.push_actions(actions2).await.unwrap(); + // Unchecked insert of batch #2 to simulate restart in the middle of processing an action sequence + // In other words batch #2 is inserted completely empty with no blocks/txs present in it + actions_sender + .push_action_unchecked(open_batch_two.clone()) + .await + .unwrap(); + // Wait until the L2 block is sealed. + state_keeper.wait_for_local_block(L2BlockNumber(2)).await; + + // Wait until L1 batch #2 is opened and persisted. + let unsealed_batch = wait_for_batch_to_be_open(&pool, L1BatchNumber(2)) + .await + .unwrap(); + assert_eq!(unsealed_batch.number, L1BatchNumber(2)); + assert_eq!(unsealed_batch.timestamp, 2); + + // Prepare the rest of batch #2 + let tx = create_l2_transaction(20, 200); + let tx_hash = tx.hash(); + let tx = FetchedTransaction::new(tx.into()); + let fictive_l2_block = SyncAction::L2Block { + params: L2BlockParams { + timestamp: 4, + virtual_blocks: 0, + }, + number: L2BlockNumber(4), + }; + let actions1 = vec![open_batch_two, tx.into(), SyncAction::SealL2Block]; + let actions2 = vec![fictive_l2_block, SyncAction::SealBatch]; + + // Restart state keeper + let (actions_sender, action_queue) = ActionQueue::new(); + let client = MockMainNodeClient::default(); + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&[tx_hash]]).await; + actions_sender.push_actions(actions1).await.unwrap(); + actions_sender.push_actions(actions2).await.unwrap(); + + let hash_task = tokio::spawn(mock_l1_batch_hash_computation(pool.clone(), 1)); + // Wait until the block #4 is sealed. + state_keeper.wait_for_local_block(L2BlockNumber(4)).await; + hash_task.await.unwrap(); +} From 37f209fec8e7cb65c0e60003d46b9ea69c43caf1 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 18 Oct 2024 17:15:28 +0300 Subject: [PATCH 091/140] feat(vm): Return compressed bytecodes from `push_transaction()` (#3126) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Returns compressed bytecodes from `VmInterface::push_transaction()`. ## Why ❔ This can be used by some external VM users. It is a more idiomatic replacement of removed `VmInterface::get_last_tx_compressed_bytecodes()`, and it's more efficient for newer VMs (doesn't clone bytecodes, instead providing a reference from the bootloader state). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../versions/testonly/bytecode_publishing.rs | 14 +++++++-- core/lib/multivm/src/versions/vm_1_3_2/vm.rs | 23 +++++++++------ .../versions/vm_1_3_2/vm_with_bootloader.rs | 19 ++++++------ core/lib/multivm/src/versions/vm_1_4_1/vm.rs | 11 +++++-- core/lib/multivm/src/versions/vm_1_4_2/vm.rs | 11 +++++-- .../src/versions/vm_boojum_integration/vm.rs | 11 +++++-- core/lib/multivm/src/versions/vm_fast/vm.rs | 18 ++++++++---- core/lib/multivm/src/versions/vm_latest/vm.rs | 11 +++++-- core/lib/multivm/src/versions/vm_m5/vm.rs | 12 +++++--- core/lib/multivm/src/versions/vm_m6/vm.rs | 20 ++++++++----- .../src/versions/vm_m6/vm_with_bootloader.rs | 28 +++++++++--------- .../src/versions/vm_refunds_enhancement/vm.rs | 13 ++++++--- .../src/versions/vm_virtual_blocks/vm.rs | 13 ++++++--- core/lib/multivm/src/vm_instance.rs | 11 ++++--- core/lib/vm_interface/src/lib.rs | 8 ++--- .../lib/vm_interface/src/types/outputs/mod.rs | 13 +++++++++ core/lib/vm_interface/src/utils/dump.rs | 10 +++---- core/lib/vm_interface/src/utils/shadow.rs | 29 +++++++++++++++---- core/lib/vm_interface/src/vm.rs | 11 +++++-- 19 files changed, 191 insertions(+), 95 deletions(-) diff --git a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs index 33af7be8cc6..346241a9624 100644 --- a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs @@ -1,4 +1,4 @@ -use zksync_test_account::{DeployContractsTx, TxType}; +use zksync_test_account::TxType; use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; use crate::{ @@ -20,8 +20,16 @@ pub(crate) fn test_bytecode_publishing() { let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); + let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; + assert_eq!(tx.execute.factory_deps.len(), 1); // The deployed bytecode is the only dependency + let push_result = vm.vm.push_transaction(tx); + assert_eq!(push_result.compressed_bytecodes.len(), 1); + assert_eq!(push_result.compressed_bytecodes[0].original, counter); + assert_eq!( + push_result.compressed_bytecodes[0].compressed, + compressed_bytecode + ); + let result = vm.vm.execute(VmExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 89196788a76..31457fc9676 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -8,8 +8,9 @@ use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, - L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, - VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::old::TracerDispatcher, utils::bytecode, @@ -44,13 +45,17 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { - crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( - &mut self.vm, - &tx, - self.system_env.execution_mode.glue_into(), - None, - ) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + let compressed_bytecodes = + crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( + &mut self.vm, + &tx, + self.system_env.execution_mode.glue_into(), + None, + ); + PushTransactionResult { + compressed_bytecodes: compressed_bytecodes.into(), + } } fn inspect( diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs index d1acdf7708e..fd4d483fba5 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs @@ -442,7 +442,7 @@ pub fn get_bootloader_memory( let mut previous_compressed: usize = 0; let mut already_included_txs_size = 0; for (tx_index_in_block, tx) in txs.into_iter().enumerate() { - let compressed_bytecodes = predefined_compressed_bytecodes[tx_index_in_block].clone(); + let compressed_bytecodes = &predefined_compressed_bytecodes[tx_index_in_block]; let mut total_compressed_len_words = 0; for i in compressed_bytecodes.iter() { @@ -475,7 +475,7 @@ pub fn push_transaction_to_bootloader_memory( tx: &Transaction, execution_mode: TxExecutionMode, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx: TransactionData = tx.clone().into(); let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); @@ -485,7 +485,7 @@ pub fn push_transaction_to_bootloader_memory( execution_mode, overhead, explicit_compressed_bytecodes, - ); + ) } pub fn push_raw_transaction_to_bootloader_memory( @@ -494,7 +494,7 @@ pub fn push_raw_transaction_to_bootloader_memory>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -555,7 +555,7 @@ pub fn push_raw_transaction_to_bootloader_memory, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); @@ -604,7 +605,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( predefined_overhead: u32, trusted_gas_limit: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let mut memory: Vec<(usize, U256)> = Vec::default(); let bootloader_description_offset = @@ -640,8 +641,8 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; let memory_addition: Vec<_> = compressed_bytecodes - .into_iter() - .flat_map(|x| bytecode::encode_call(&x)) + .iter() + .flat_map(bytecode::encode_call) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 4122ee94e66..1c38958bb31 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -9,7 +9,7 @@ use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, utils::events::extract_l2tol1logs_from_l1_messenger, @@ -81,9 +81,14 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index fe2015debd2..ca69a191e26 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -11,7 +11,7 @@ use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, utils::events::extract_l2tol1logs_from_l1_messenger, @@ -83,9 +83,14 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index ebc0a511d20..bfd055a5cc8 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -9,7 +9,7 @@ use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, utils::events::extract_l2tol1logs_from_l1_messenger, @@ -81,9 +81,14 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 39c9b3c5656..88e0b10b5ea 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -13,7 +13,7 @@ use zksync_types::{ BYTES_PER_ENUMERATION_INDEX, }, AccountTreeId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, - BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + Transaction, BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; @@ -35,10 +35,10 @@ use crate::{ interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, - TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, - VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - VmTrackingContracts, + ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, PushTransactionResult, + Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, + VmExecutionResultAndLogs, VmExecutionStatistics, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmRevertReason, VmTrackingContracts, }, is_supported_by_fast_vm, utils::events::extract_l2tol1logs_from_l1_messenger, @@ -553,8 +553,14 @@ where impl VmInterface for Vm { type TracerDispatcher = Tr; - fn push_transaction(&mut self, tx: zksync_types::Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_inner(tx, 0, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } fn inspect( diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index f4cc1580e93..3a36b008e88 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -13,7 +13,7 @@ use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmTrackingContracts, }, @@ -134,9 +134,14 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 5a26506f346..3d57d1cd543 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -5,8 +5,9 @@ use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_m5::{ storage::Storage, @@ -60,12 +61,15 @@ impl VmInterface for Vm { /// Tracers are not supported for here we use `()` as a placeholder type TracerDispatcher = (); - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { crate::vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, self.system_env.execution_mode.glue_into(), - ) + ); + PushTransactionResult { + compressed_bytecodes: (&[]).into(), // bytecode compression isn't supported + } } fn inspect( diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 1fdc8ae64f8..1ee6aa61822 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -7,7 +7,7 @@ use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, @@ -72,13 +72,17 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { - crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( - &mut self.vm, - &tx, - self.system_env.execution_mode.glue_into(), - None, - ) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { + let compressed_bytecodes = + crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( + &mut self.vm, + &tx, + self.system_env.execution_mode.glue_into(), + None, + ); + PushTransactionResult { + compressed_bytecodes: compressed_bytecodes.into(), + } } fn inspect( diff --git a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs index 7a9fbb73fe4..ae44e721b0d 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs @@ -491,7 +491,7 @@ fn get_bootloader_memory_v1( predefined_refunds[tx_index_in_block], block_gas_price_per_pubdata as u32, previous_compressed, - compressed_bytecodes, + &compressed_bytecodes, ); previous_compressed += total_compressed_len; @@ -536,7 +536,7 @@ fn get_bootloader_memory_v2( predefined_refunds[tx_index_in_block], block_gas_price_per_pubdata as u32, previous_compressed, - compressed_bytecodes, + &compressed_bytecodes, ); previous_compressed += total_compressed_len_words; @@ -554,7 +554,7 @@ pub fn push_transaction_to_bootloader_memory( tx: &Transaction, execution_mode: TxExecutionMode, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx: TransactionData = tx.clone().into(); let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); @@ -564,7 +564,7 @@ pub fn push_transaction_to_bootloader_memory( execution_mode, overhead, explicit_compressed_bytecodes, - ); + ) } pub fn push_raw_transaction_to_bootloader_memory( @@ -573,7 +573,7 @@ pub fn push_raw_transaction_to_bootloader_memory( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { match vm.vm_subversion { MultiVMSubversion::V1 => push_raw_transaction_to_bootloader_memory_v1( vm, @@ -599,7 +599,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -651,7 +651,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( predefined_overhead, trusted_ergs_limit, previous_bytecodes, - compressed_bytecodes, + &compressed_bytecodes, ); vm.state.memory.populate_page( @@ -661,6 +661,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( ); vm.bootloader_state.add_tx_data(encoded_tx_size); vm.bootloader_state.add_compressed_bytecode(compressed_len); + compressed_bytecodes } // Bytecode compression bug fixed @@ -670,7 +671,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -730,7 +731,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( predefined_overhead, trusted_ergs_limit, previous_bytecodes, - compressed_bytecodes, + &compressed_bytecodes, ); vm.state.memory.populate_page( @@ -741,6 +742,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( vm.bootloader_state.add_tx_data(encoded_tx_size); vm.bootloader_state .add_compressed_bytecode(compressed_bytecodes_encoding_len_words); + compressed_bytecodes } #[allow(clippy::too_many_arguments)] @@ -752,7 +754,7 @@ fn get_bootloader_memory_for_tx( predefined_refund: u32, block_gas_per_pubdata: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); @@ -779,7 +781,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( predefined_overhead: u32, trusted_gas_limit: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let mut memory: Vec<(usize, U256)> = Vec::default(); let bootloader_description_offset = @@ -815,8 +817,8 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; let memory_addition: Vec<_> = compressed_bytecodes - .into_iter() - .flat_map(|x| bytecode::encode_call(&x)) + .iter() + .flat_map(bytecode::encode_call) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index d87fd4d104d..2bcd68bec04 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -6,7 +6,7 @@ use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, vm_latest::HistoryEnabled, @@ -74,9 +74,14 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 28c09590f2a..497128c64bd 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -6,7 +6,7 @@ use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, vm_latest::HistoryEnabled, @@ -74,9 +74,14 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 89707034523..43a6c48aa9c 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -8,8 +8,8 @@ use crate::{ interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, utils::ShadowVm, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, - VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::TracerDispatcher, @@ -55,8 +55,7 @@ macro_rules! dispatch_legacy_vm { impl VmInterface for LegacyVmInstance { type TracerDispatcher = TracerDispatcher, H>; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { dispatch_legacy_vm!(self.push_transaction(tx)) } @@ -247,8 +246,8 @@ impl VmInterface for FastVmInsta Tr, ); - fn push_transaction(&mut self, tx: Transaction) { - dispatch_fast_vm!(self.push_transaction(tx)); + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + dispatch_fast_vm!(self.push_transaction(tx)) } fn inspect( diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 645e3e7c856..e0287483067 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -30,10 +30,10 @@ pub use crate::{ outputs::{ BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, - ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, Refunds, - TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, - VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, - VmMemoryMetrics, + ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, + PushTransactionResult, Refunds, TransactionExecutionMetrics, + TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, + VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, }, tracer, }, diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index 1fa1cd5d168..fe25801dd12 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + pub use self::{ bytecode::CompressedBytecodeInfo, execution_result::{ @@ -20,3 +22,14 @@ mod execution_state; mod finished_l1batch; mod l2_block; mod statistic; + +/// Result of pushing a transaction to the VM state without executing it. +#[derive(Debug)] +pub struct PushTransactionResult<'a> { + /// Compressed bytecodes for the transaction. If the VM doesn't support bytecode compression, returns + /// an empty slice. + /// + /// Importantly, these bytecodes are not guaranteed to be published by the transaction; + /// e.g., it may run out of gas during publication. + pub compressed_bytecodes: Cow<'a, [CompressedBytecodeInfo]>, +} diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs index 288c6445494..522a455a11b 100644 --- a/core/lib/vm_interface/src/utils/dump.rs +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -5,9 +5,9 @@ use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2BlockNumber, Tr use crate::{ storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmTrackingContracts, + BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, + VmInterfaceHistoryEnabled, VmTrackingContracts, }; fn create_storage_snapshot( @@ -142,9 +142,9 @@ impl DumpingVm { impl VmInterface for DumpingVm { type TracerDispatcher = Vm::TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { self.record_transaction(tx.clone()); - self.inner.push_transaction(tx); + self.inner.push_transaction(tx) } fn inspect( diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs index 92eb65a810f..8cdc899238e 100644 --- a/core/lib/vm_interface/src/utils/shadow.rs +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -11,8 +11,8 @@ use super::dump::{DumpingVm, VmDump}; use crate::{ storage::{ReadStorage, StoragePtr, StorageView}, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmTrackingContracts, + PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmTrackingContracts, }; /// Handler for VM divergences. @@ -163,11 +163,30 @@ where ::TracerDispatcher, ); - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + let main_result = self.main.push_transaction(tx.clone()); + // Extend lifetime to `'static` so that the result isn't mutably borrowed from the main VM. + // Unfortunately, there's no way to express that this borrow is actually immutable, which would allow not extending the lifetime unless there's a divergence. + let main_result: PushTransactionResult<'static> = PushTransactionResult { + compressed_bytecodes: main_result.compressed_bytecodes.into_owned().into(), + }; + if let Some(shadow) = self.shadow.get_mut() { - shadow.vm.push_transaction(tx.clone()); + let tx_repr = format!("{tx:?}"); // includes little data, so is OK to call proactively + let shadow_result = shadow.vm.push_transaction(tx); + + let mut errors = DivergenceErrors::new(); + errors.check_match( + "bytecodes", + &main_result.compressed_bytecodes, + &shadow_result.compressed_bytecodes, + ); + if let Err(err) = errors.into_result() { + let ctx = format!("pushing transaction {tx_repr}"); + self.report(err.context(ctx)); + } } - self.main.push_transaction(tx); + main_result } fn inspect( diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index 37e33a92b50..3a06d7f80cb 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -15,15 +15,20 @@ use zksync_types::{Transaction, H256}; use crate::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, + PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, }; pub trait VmInterface { /// Lifetime is used to be able to define `Option<&mut _>` as a dispatcher. type TracerDispatcher: Default; - /// Push transaction to bootloader memory. - fn push_transaction(&mut self, tx: Transaction); + /// Pushes a transaction to bootloader memory for future execution with bytecode compression (if it's supported by the VM). + /// + /// # Return value + /// + /// Returns preprocessing results, such as compressed bytecodes. The results may borrow from the VM state, + /// so you may want to inspect results before next operations with the VM, or clone the necessary parts. + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_>; /// Executes the next VM step (either next transaction or bootloader or the whole batch) /// with custom tracers. From 650361ae456f67fd7e10d1c4ab92647d7f397fc1 Mon Sep 17 00:00:00 2001 From: Vladislav Volosnikov Date: Fri, 18 Oct 2024 23:09:37 +0200 Subject: [PATCH 092/140] chore: Update EVM emulator-related code (#3127) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Updates after latest changes. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../system-constants-generator/src/utils.rs | 8 ++++---- core/lib/contracts/src/lib.rs | 19 ++++++++++--------- core/lib/types/src/api/mod.rs | 2 +- core/lib/types/src/system_contracts.rs | 2 +- core/node/node_sync/src/genesis.rs | 2 +- core/node/vm_runner/src/impls/bwip.rs | 2 +- 6 files changed, 18 insertions(+), 17 deletions(-) diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index ce7182a3aa4..800da68ee50 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -3,7 +3,7 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; use zksync_contracts::{ load_sys_contract, read_bootloader_code, read_bytecode_from_path, read_sys_contract_bytecode, - read_zbin_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, + read_yul_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, }; use zksync_multivm::{ interface::{ @@ -176,10 +176,10 @@ fn read_bootloader_test_code(test: &str) -> Vec { )){ contract } else { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", + read_yul_bytecode( + "contracts/system-contracts/bootloader/tests/artifacts", test - )) + ) } } diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a9e7324d5af..0ee773abcd4 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -295,10 +295,11 @@ impl SystemContractsRepo { ))) { contract } else { - read_zbin_bytecode_from_path(self.root.join(format!( - "contracts-preprocessed/{0}artifacts/{1}.yul.zbin", - directory, name - ))) + read_yul_bytecode_by_path( + self.root + .join(format!("contracts-preprocessed/{directory}artifacts")), + name, + ) } } } @@ -313,10 +314,10 @@ pub fn read_bootloader_code(bootloader_type: &str) -> Vec { { return contract; }; - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/build/artifacts/{}.yul.zbin", - bootloader_type - )) + read_yul_bytecode( + "contracts/system-contracts/bootloader/build/artifacts", + bootloader_type, + ) } fn read_proved_batch_bootloader_bytecode() -> Vec { @@ -438,7 +439,7 @@ impl BaseSystemContracts { /// Loads the latest EVM emulator for these base system contracts. Logically, it only makes sense to do for the latest protocol version. pub fn with_latest_evm_emulator(mut self) -> Self { - let bytecode = read_sys_contract_bytecode("", "EvmInterpreter", ContractLanguage::Yul); + let bytecode = read_sys_contract_bytecode("", "EvmEmulator", ContractLanguage::Yul); let hash = hash_bytecode(&bytecode); self.evm_emulator = Some(SystemContractCode { code: bytes_to_be_words(bytecode), diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 1c7672264cb..b8f8a2f0584 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -644,7 +644,7 @@ pub struct ProtocolVersion { /// Verifier configuration #[deprecated] pub verification_keys_hashes: Option, - /// Hashes of base system contracts (bootloader, default account and evm simulator) + /// Hashes of base system contracts (bootloader, default account and evm emulator) #[deprecated] pub base_system_contracts: Option, /// Bootloader code hash diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index 4329680991c..4d1ff9b554e 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -151,7 +151,7 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 26] = [ "", "EvmGasManager", EVM_GAS_MANAGER_ADDRESS, - ContractLanguage::Sol, + ContractLanguage::Yul, ), // For now, only zero address and the bootloader address have empty bytecode at the init // In the future, we might want to set all of the system contracts this way. diff --git a/core/node/node_sync/src/genesis.rs b/core/node/node_sync/src/genesis.rs index 0ff8d0d448c..c5d4869175d 100644 --- a/core/node/node_sync/src/genesis.rs +++ b/core/node/node_sync/src/genesis.rs @@ -109,7 +109,7 @@ async fn fetch_base_system_contracts( let bytes = client .fetch_system_contract_by_hash(hash) .await? - .context("EVM Simulator bytecode is missing on main node")?; + .context("EVM emulator bytecode is missing on main node")?; Some(SystemContractCode { code: zksync_utils::bytes_to_be_words(bytes), hash, diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index dc94752d988..a2cf126f549 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -248,7 +248,7 @@ async fn get_updates_manager_witness_input_data( .factory_deps_dal() .get_sealed_factory_dep(evm_emulator) .await? - .ok_or_else(|| anyhow!("EVM Simulator bytecode should exist"))?; + .ok_or_else(|| anyhow!("EVM emulator bytecode should exist"))?; let evm_emulator_bytecode = bytes_to_chunks(&evm_emulator_bytecode); used_bytecodes.insert(evm_emulator_code_hash, evm_emulator_bytecode); } From 30ceee8a48046e349ff0234ebb24d468a0e0876c Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Mon, 21 Oct 2024 11:21:05 +0200 Subject: [PATCH 093/140] feat(tee_verifier): speedup SQL query for new jobs (#3133) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Speedup SQL queries for new TEE prover jobs. ## Why ❔ Testing the L1Batch table also gives nothing, as missing data would be caught later anyway, when assembling the TEE prover input data. Catching it later also means, that we can apply different error handling strategies depending on the L1Batch age. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. Signed-off-by: Harald Hoyer --- ...7dc982c8cfb0e2277aff8dfaa9654255451ac.json | 26 ------------------- ...81b01395cfd2a3e95fb4593229bd878163320.json | 26 +++++++++++++++++++ core/lib/dal/src/tee_proof_generation_dal.rs | 6 ----- 3 files changed, 26 insertions(+), 32 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json create mode 100644 core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json diff --git a/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json b/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json deleted file mode 100644 index 4d006b6d1d5..00000000000 --- a/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n l1_batches l1\n ON p.l1_batch_number = l1.number\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n AND l1.hash IS NOT NULL\n AND l1.aux_data_hash IS NOT NULL\n AND l1.meta_parameters_hash IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - "Text", - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac" -} diff --git a/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json b/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json new file mode 100644 index 00000000000..4b219bfee0a --- /dev/null +++ b/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320" +} diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index d865212f190..bde07f73280 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -43,9 +43,6 @@ impl TeeProofGenerationDal<'_, '_> { p.l1_batch_number FROM proof_generation_details p - LEFT JOIN - l1_batches l1 - ON p.l1_batch_number = l1.number LEFT JOIN tee_proof_generation_details tee ON @@ -56,9 +53,6 @@ impl TeeProofGenerationDal<'_, '_> { p.l1_batch_number >= $5 AND p.vm_run_data_blob_url IS NOT NULL AND p.proof_gen_data_blob_url IS NOT NULL - AND l1.hash IS NOT NULL - AND l1.aux_data_hash IS NOT NULL - AND l1.meta_parameters_hash IS NOT NULL ) AND ( tee.l1_batch_number IS NULL From e1c363f8f5e03c8d62bba1523f17b87d6a0e25ad Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 21 Oct 2024 12:22:58 +0300 Subject: [PATCH 094/140] fix(state-keeper): save call trace for upgrade txs (#3132) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ save call trace for upgrade txs ## Why ❔ save call trace for upgrade txs ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/node/state_keeper/src/keeper.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 22f24573070..bd102daa308 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -687,6 +687,7 @@ impl ZkSyncStateKeeper { tx_result, tx_metrics, compressed_bytecodes, + call_tracer_result, .. } = exec_result else { @@ -711,7 +712,7 @@ impl ZkSyncStateKeeper { tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, - vec![], + call_tracer_result, ); Ok(()) } From 78839e9713773aa8940d2aa0c87e165b25ec7ebe Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Mon, 21 Oct 2024 17:23:57 +0800 Subject: [PATCH 095/140] docs: Remove link to old witness generator documentation which is now obsolete (#3093) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- prover/crates/bin/witness_generator/README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/prover/crates/bin/witness_generator/README.md b/prover/crates/bin/witness_generator/README.md index dc476ca44fc..6063c29b334 100644 --- a/prover/crates/bin/witness_generator/README.md +++ b/prover/crates/bin/witness_generator/README.md @@ -1,9 +1,5 @@ # WitnessGenerator -Please read this -[doc](https://www.notion.so/matterlabs/Draft-FRI-Prover-Integration-Prover-Shadowing-c4b1373786eb43779a93118be4be5d99) -for rationale of this binary, alongside the existing one in zk-core. - The component is responsible for generating prover jobs and saving artifacts needed for the next round of proof aggregation. That is, every aggregation round needs two sets of input: From cfbcc11be0826e8c55fafa84ae01b2aead25d127 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 21 Oct 2024 13:04:29 +0300 Subject: [PATCH 096/140] refactor: Refactor fee-related types (#3121) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Moves basic logic for fee types close to the type declaration (i.e., to `zksync_types`). - Unifies `PubdataSendingMode` and `PubdataDA`. - Removes dependency of `zksync_types` on `zksync_config`. ## Why ❔ This allows external apps depending on fee types (e.g., the test node) to not have behemoths like `zksync_dal` in their dependency graph. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- Cargo.lock | 4 - core/bin/zksync_server/src/node_builder.rs | 10 +- core/lib/basic_types/src/lib.rs | 1 + .../{types => basic_types}/src/pubdata_da.rs | 23 +- core/lib/config/src/configs/eth_sender.rs | 11 +- core/lib/config/src/testonly.rs | 15 +- core/lib/env_config/src/eth_sender.rs | 3 +- .../src/i_executor/methods/commit_batches.rs | 4 +- .../structures/commit_batch_info.rs | 18 +- core/lib/protobuf_config/src/eth.rs | 23 +- core/lib/types/Cargo.toml | 3 +- core/lib/types/src/fee_model.rs | 463 +++++++++++++++++- core/lib/types/src/lib.rs | 1 - core/node/api_server/src/testonly.rs | 7 +- core/node/api_server/src/web3/tests/mod.rs | 7 +- core/node/consistency_checker/src/lib.rs | 14 +- .../node/consistency_checker/src/tests/mod.rs | 4 +- .../eth_sender/src/aggregated_operations.rs | 8 +- core/node/eth_sender/src/aggregator.rs | 9 +- core/node/eth_sender/src/eth_tx_aggregator.rs | 13 +- core/node/eth_sender/src/publish_criterion.rs | 4 +- core/node/eth_sender/src/tester.rs | 8 +- core/node/fee_model/Cargo.toml | 2 - .../src/l1_gas_price/gas_adjuster/mod.rs | 7 +- .../src/l1_gas_price/gas_adjuster/tests.rs | 6 +- core/node/fee_model/src/lib.rs | 459 +---------------- .../implementations/layers/gas_adjuster.rs | 3 +- .../src/implementations/layers/l1_gas.rs | 28 +- core/node/state_keeper/src/io/tests/tester.rs | 3 +- prover/Cargo.lock | 2 - zkstack_cli/Cargo.lock | 2 - 31 files changed, 564 insertions(+), 601 deletions(-) rename core/lib/{types => basic_types}/src/pubdata_da.rs (54%) diff --git a/Cargo.lock b/Cargo.lock index a5e51346bdf..5da4cc8c143 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10645,7 +10645,6 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bigdecimal", "test-casing", "tokio", "tracing", @@ -10654,7 +10653,6 @@ dependencies = [ "zksync_dal", "zksync_eth_client", "zksync_types", - "zksync_utils", "zksync_web3_decl", ] @@ -11235,7 +11233,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -11244,7 +11241,6 @@ dependencies = [ "tokio", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index c87bf3ce2dd..234e2289424 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -4,8 +4,8 @@ use anyhow::Context; use zksync_config::{ configs::{ - da_client::DAClientConfig, eth_sender::PubdataSendingMode, - secrets::DataAvailabilitySecrets, wallets::Wallets, GeneralConfig, Secrets, + da_client::DAClientConfig, secrets::DataAvailabilitySecrets, wallets::Wallets, + GeneralConfig, Secrets, }, ContractsConfig, GenesisConfig, }; @@ -69,7 +69,9 @@ use zksync_node_framework::{ }, service::{ZkStackService, ZkStackServiceBuilder}, }; -use zksync_types::{settlement::SettlementMode, SHARED_BRIDGE_ETHER_TOKEN_ADDRESS}; +use zksync_types::{ + pubdata_da::PubdataSendingMode, settlement::SettlementMode, SHARED_BRIDGE_ETHER_TOKEN_ADDRESS, +}; use zksync_vlog::prometheus::PrometheusExporterConfig; /// Macro that looks into a path to fetch an optional config, @@ -189,7 +191,7 @@ impl MainNodeBuilder { .add_layer(BaseTokenRatioProviderLayer::new(base_token_adjuster_config)); } let state_keeper_config = try_load_config!(self.configs.state_keeper_config); - let l1_gas_layer = L1GasLayer::new(state_keeper_config); + let l1_gas_layer = L1GasLayer::new(&state_keeper_config); self.node.add_layer(l1_gas_layer); Ok(self) } diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 197bd8eb7aa..79c7b3924e3 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -29,6 +29,7 @@ pub mod commitment; pub mod network; pub mod protocol_version; pub mod prover_dal; +pub mod pubdata_da; pub mod seed_phrase; pub mod settlement; pub mod tee_types; diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/basic_types/src/pubdata_da.rs similarity index 54% rename from core/lib/types/src/pubdata_da.rs rename to core/lib/basic_types/src/pubdata_da.rs index bc7dc55e53d..3f042da98ac 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/basic_types/src/pubdata_da.rs @@ -1,15 +1,17 @@ +//! Types related to data availability. + use chrono::{DateTime, Utc}; use num_enum::TryFromPrimitive; use serde::{Deserialize, Serialize}; -use zksync_basic_types::L1BatchNumber; -use zksync_config::configs::eth_sender::PubdataSendingMode; + +use crate::L1BatchNumber; /// Enum holding the current values used for DA Layers. #[repr(u8)] -#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] -#[derive(TryFromPrimitive)] -pub enum PubdataDA { +#[derive(Debug, Clone, Copy, Default, PartialEq, Deserialize, Serialize, TryFromPrimitive)] +pub enum PubdataSendingMode { /// Pubdata is sent to the L1 as a tx calldata. + #[default] Calldata = 0, /// Pubdata is sent to L1 as EIP-4844 blobs. Blobs, @@ -19,17 +21,6 @@ pub enum PubdataDA { RelayedL2Calldata, } -impl From for PubdataDA { - fn from(value: PubdataSendingMode) -> Self { - match value { - PubdataSendingMode::Calldata => PubdataDA::Calldata, - PubdataSendingMode::Blobs => PubdataDA::Blobs, - PubdataSendingMode::Custom => PubdataDA::Custom, - PubdataSendingMode::RelayedL2Calldata => PubdataDA::RelayedL2Calldata, - } - } -} - /// Represents a blob in the data availability layer. #[derive(Debug, Clone)] pub struct DataAvailabilityBlob { diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 3a1a0505728..7b67f015238 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context as _; use serde::Deserialize; -use zksync_basic_types::{settlement::SettlementMode, H256}; +use zksync_basic_types::{pubdata_da::PubdataSendingMode, settlement::SettlementMode, H256}; use zksync_crypto_primitives::K256PrivateKey; use crate::EthWatchConfig; @@ -80,15 +80,6 @@ pub enum ProofLoadingMode { FriProofFromGcs, } -#[derive(Debug, Deserialize, Clone, Copy, PartialEq, Default)] -pub enum PubdataSendingMode { - #[default] - Calldata, - Blobs, - Custom, - RelayedL2Calldata, -} - #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct SenderConfig { pub aggregated_proof_sizes: Vec, diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 0fdd927d19f..9b1ec13e2d2 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -7,6 +7,7 @@ use zksync_basic_types::{ commitment::L1BatchCommitmentMode, network::Network, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + pubdata_da::PubdataSendingMode, seed_phrase::SeedPhrase, vm::FastVmMode, L1BatchNumber, L1ChainId, L2ChainId, @@ -16,8 +17,7 @@ use zksync_crypto_primitives::K256PrivateKey; use crate::{ configs::{ - self, da_client::DAClientConfig::Avail, eth_sender::PubdataSendingMode, - external_price_api_client::ForcedPriceClientConfig, + self, da_client::DAClientConfig::Avail, external_price_api_client::ForcedPriceClientConfig, }, AvailConfig, }; @@ -388,17 +388,6 @@ impl Distribution for EncodeDist { } } -impl Distribution for EncodeDist { - fn sample(&self, rng: &mut R) -> configs::eth_sender::PubdataSendingMode { - type T = configs::eth_sender::PubdataSendingMode; - match rng.gen_range(0..3) { - 0 => T::Calldata, - 1 => T::Blobs, - _ => T::Custom, - } - } -} - impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::eth_sender::SenderConfig { configs::eth_sender::SenderConfig { diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index e5132eb7d91..0fd61fd173b 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -41,7 +41,8 @@ impl FromEnv for GasAdjusterConfig { #[cfg(test)] mod tests { - use zksync_config::configs::eth_sender::{ProofSendingMode, PubdataSendingMode}; + use zksync_basic_types::pubdata_da::PubdataSendingMode; + use zksync_config::configs::eth_sender::ProofSendingMode; use super::*; use crate::test_utils::{hash, EnvMutex}; diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs index 883804f0bd6..67819f7d7cc 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -1,7 +1,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi::Token, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, }; use crate::{ @@ -14,7 +14,7 @@ use crate::{ pub struct CommitBatches<'a> { pub last_committed_l1_batch: &'a L1BatchWithMetadata, pub l1_batches: &'a [L1BatchWithMetadata], - pub pubdata_da: PubdataDA, + pub pubdata_da: PubdataSendingMode, pub mode: L1BatchCommitmentMode, } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 179c04748d3..6438aeb7f55 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -4,7 +4,7 @@ use zksync_types::{ L1BatchWithMetadata, }, ethabi::Token, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, web3::contract::Error as ContractError, ProtocolVersionId, U256, }; @@ -24,14 +24,14 @@ const PUBDATA_SOURCE_CUSTOM: u8 = 2; pub struct CommitBatchInfo<'a> { mode: L1BatchCommitmentMode, l1_batch_with_metadata: &'a L1BatchWithMetadata, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, } impl<'a> CommitBatchInfo<'a> { pub fn new( mode: L1BatchCommitmentMode, l1_batch_with_metadata: &'a L1BatchWithMetadata, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, ) -> Self { Self { mode, @@ -204,24 +204,24 @@ impl Tokenizable for CommitBatchInfo<'_> { // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. ( L1BatchCommitmentMode::Validium, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { vec![PUBDATA_SOURCE_CALLDATA] } - (L1BatchCommitmentMode::Validium, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Blobs) => { vec![PUBDATA_SOURCE_BLOBS] } - (L1BatchCommitmentMode::Rollup, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Custom) => { panic!("Custom pubdata DA is incompatible with Rollup mode") } - (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { vec![PUBDATA_SOURCE_CUSTOM] } ( L1BatchCommitmentMode::Rollup, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { // We compute and add the blob commitment to the pubdata payload so that we can verify the proof // even if we are not using blobs. @@ -232,7 +232,7 @@ impl Tokenizable for CommitBatchInfo<'_> { .chain(blob_commitment) .collect() } - (L1BatchCommitmentMode::Rollup, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Blobs) => { let pubdata = self.pubdata_input(); let pubdata_commitments = pubdata.chunks(ZK_SYNC_BYTES_PER_BLOB).flat_map(|blob| { diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index c1d95bd30d2..d4ea1d9f269 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; use zksync_config::configs::{self}; use zksync_protobuf::{required, ProtoRepr}; +use zksync_types::pubdata_da::PubdataSendingMode; use crate::{proto::eth as proto, read_optional_repr}; @@ -25,23 +26,21 @@ impl proto::ProofSendingMode { } impl proto::PubdataSendingMode { - fn new(x: &configs::eth_sender::PubdataSendingMode) -> Self { - use configs::eth_sender::PubdataSendingMode as From; + fn new(x: &PubdataSendingMode) -> Self { match x { - From::Calldata => Self::Calldata, - From::Blobs => Self::Blobs, - From::Custom => Self::Custom, - From::RelayedL2Calldata => Self::RelayedL2Calldata, + PubdataSendingMode::Calldata => Self::Calldata, + PubdataSendingMode::Blobs => Self::Blobs, + PubdataSendingMode::Custom => Self::Custom, + PubdataSendingMode::RelayedL2Calldata => Self::RelayedL2Calldata, } } - fn parse(&self) -> configs::eth_sender::PubdataSendingMode { - use configs::eth_sender::PubdataSendingMode as To; + fn parse(&self) -> PubdataSendingMode { match self { - Self::Calldata => To::Calldata, - Self::Blobs => To::Blobs, - Self::Custom => To::Custom, - Self::RelayedL2Calldata => To::RelayedL2Calldata, + Self::Calldata => PubdataSendingMode::Calldata, + Self::Blobs => PubdataSendingMode::Blobs, + Self::Custom => PubdataSendingMode::Custom, + Self::RelayedL2Calldata => PubdataSendingMode::RelayedL2Calldata, } } } diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 54c38384a7a..ffa9d219f08 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -11,12 +11,12 @@ keywords.workspace = true categories.workspace = true [dependencies] +# **IMPORTANT.** Please do not add dependency on `zksync_config` etc. This crate has a heavy dependency graph as is. zksync_system_constants.workspace = true zksync_utils.workspace = true zksync_basic_types.workspace = true zksync_contracts.workspace = true zksync_mini_merkle_tree.workspace = true -zksync_config.workspace = true zksync_protobuf.workspace = true zksync_crypto_primitives.workspace = true @@ -39,7 +39,6 @@ itertools.workspace = true tracing.workspace = true # Crypto stuff -secp256k1.workspace = true blake2.workspace = true [dev-dependencies] diff --git a/core/lib/types/src/fee_model.rs b/core/lib/types/src/fee_model.rs index b59aa65b04e..ae346656ea6 100644 --- a/core/lib/types/src/fee_model.rs +++ b/core/lib/types/src/fee_model.rs @@ -1,11 +1,13 @@ +// FIXME: separate crate together with node_fee_model interfaces? + use std::num::NonZeroU64; use bigdecimal::{BigDecimal, ToPrimitive}; use serde::{Deserialize, Serialize}; -use zksync_config::configs::chain::{FeeModelVersion, StateKeeperConfig}; use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE; +use zksync_utils::ceil_div_u256; -use crate::ProtocolVersionId; +use crate::{ProtocolVersionId, U256}; /// Fee input to be provided into the VM. It contains two options: /// - `L1Pegged`: L1 gas price is provided to the VM, and the pubdata price is derived from it. Using this option is required for the @@ -203,6 +205,7 @@ pub struct FeeModelConfigV2 { /// The maximum amount of pubdata that can be used by the batch. Note that if the calldata is used as pubdata, this variable should not exceed 128kb. pub max_pubdata_per_batch: u64, } + impl Default for FeeModelConfig { /// Config with all zeroes is not a valid config (since for instance having 0 max gas per batch may incur division by zero), /// so we implement a sensible default config here. @@ -213,24 +216,6 @@ impl Default for FeeModelConfig { } } -impl FeeModelConfig { - pub fn from_state_keeper_config(state_keeper_config: &StateKeeperConfig) -> Self { - match state_keeper_config.fee_model_version { - FeeModelVersion::V1 => Self::V1(FeeModelConfigV1 { - minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, - }), - FeeModelVersion::V2 => Self::V2(FeeModelConfigV2 { - minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, - compute_overhead_part: state_keeper_config.compute_overhead_part, - pubdata_overhead_part: state_keeper_config.pubdata_overhead_part, - batch_overhead_l1_gas: state_keeper_config.batch_overhead_l1_gas, - max_gas_per_batch: state_keeper_config.max_gas_per_batch, - max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, - }), - } - } -} - #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct FeeParamsV1 { pub config: FeeModelConfigV1, @@ -337,4 +322,442 @@ impl FeeParams { l1_gas_price: 1_000_000_000, }) } + + /// Provides scaled [`BatchFeeInput`] based on these parameters. + pub fn scale( + self, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, + ) -> BatchFeeInput { + match self { + Self::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( + params, + l1_gas_price_scale_factor, + )), + Self::V2(params) => BatchFeeInput::PubdataIndependent(clip_batch_fee_model_input_v2( + compute_batch_fee_model_input_v2( + params, + l1_gas_price_scale_factor, + l1_pubdata_price_scale_factor, + ), + )), + } + } +} + +/// Calculates the batch fee input based on the main node parameters. +/// This function uses the `V1` fee model, i.e. where the pubdata price does not include the proving costs. +fn compute_batch_fee_model_input_v1( + params: FeeParamsV1, + l1_gas_price_scale_factor: f64, +) -> L1PeggedBatchFeeModelInput { + let l1_gas_price = (params.l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; + + L1PeggedBatchFeeModelInput { + l1_gas_price, + fair_l2_gas_price: params.config.minimal_l2_gas_price, + } +} + +/// Calculates the batch fee input based on the main node parameters. +/// This function uses the `V2` fee model, i.e. where the pubdata price does not include the proving costs. +fn compute_batch_fee_model_input_v2( + params: FeeParamsV2, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, +) -> PubdataIndependentBatchFeeModelInput { + let config = params.config(); + let l1_gas_price = params.l1_gas_price(); + let l1_pubdata_price = params.l1_pubdata_price(); + + let FeeModelConfigV2 { + minimal_l2_gas_price, + compute_overhead_part, + pubdata_overhead_part, + batch_overhead_l1_gas, + max_gas_per_batch, + max_pubdata_per_batch, + } = config; + + // Firstly, we scale the gas price and pubdata price in case it is needed. + let l1_gas_price = (l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; + let l1_pubdata_price = (l1_pubdata_price as f64 * l1_pubdata_price_scale_factor) as u64; + + // While the final results of the calculations are not expected to have any overflows, the intermediate computations + // might, so we use U256 for them. + let l1_batch_overhead_wei = U256::from(l1_gas_price) * U256::from(batch_overhead_l1_gas); + + let fair_l2_gas_price = { + // Firstly, we calculate which part of the overall overhead each unit of L2 gas should cover. + let l1_batch_overhead_per_gas = + ceil_div_u256(l1_batch_overhead_wei, U256::from(max_gas_per_batch)); + + // Then, we multiply by the `compute_overhead_part` to get the overhead for the computation for each gas. + // Also, this means that if we almost never close batches because of compute, the `compute_overhead_part` should be zero and so + // it is possible that the computation costs include for no overhead. + let gas_overhead_wei = + (l1_batch_overhead_per_gas.as_u64() as f64 * compute_overhead_part) as u64; + + // We sum up the minimal L2 gas price (i.e. the raw prover/compute cost of a single L2 gas) and the overhead for batch being closed. + minimal_l2_gas_price + gas_overhead_wei + }; + + let fair_pubdata_price = { + // Firstly, we calculate which part of the overall overhead each pubdata byte should cover. + let l1_batch_overhead_per_pubdata = + ceil_div_u256(l1_batch_overhead_wei, U256::from(max_pubdata_per_batch)); + + // Then, we multiply by the `pubdata_overhead_part` to get the overhead for each pubdata byte. + // Also, this means that if we almost never close batches because of pubdata, the `pubdata_overhead_part` should be zero and so + // it is possible that the pubdata costs include no overhead. + let pubdata_overhead_wei = + (l1_batch_overhead_per_pubdata.as_u64() as f64 * pubdata_overhead_part) as u64; + + // We sum up the raw L1 pubdata price (i.e. the expected price of publishing a single pubdata byte) and the overhead for batch being closed. + l1_pubdata_price + pubdata_overhead_wei + }; + + PubdataIndependentBatchFeeModelInput { + l1_gas_price, + fair_l2_gas_price, + fair_pubdata_price, + } +} + +/// Bootloader places limitations on fair_l2_gas_price and fair_pubdata_price. +/// (MAX_ALLOWED_FAIR_L2_GAS_PRICE and MAX_ALLOWED_FAIR_PUBDATA_PRICE in bootloader code respectively) +/// Server needs to clip this prices in order to allow chain continues operation at a loss. The alternative +/// would be to stop accepting the transactions until the conditions improve. +/// TODO (PE-153): to be removed when bootloader limitation is removed +fn clip_batch_fee_model_input_v2( + fee_model: PubdataIndependentBatchFeeModelInput, +) -> PubdataIndependentBatchFeeModelInput { + /// MAX_ALLOWED_FAIR_L2_GAS_PRICE + const MAXIMUM_L2_GAS_PRICE: u64 = 10_000_000_000_000; + /// MAX_ALLOWED_FAIR_PUBDATA_PRICE + const MAXIMUM_PUBDATA_PRICE: u64 = 1_000_000_000_000_000; + PubdataIndependentBatchFeeModelInput { + l1_gas_price: fee_model.l1_gas_price, + fair_l2_gas_price: if fee_model.fair_l2_gas_price < MAXIMUM_L2_GAS_PRICE { + fee_model.fair_l2_gas_price + } else { + tracing::warn!( + "Fair l2 gas price {} exceeds maximum. Limitting to {}", + fee_model.fair_l2_gas_price, + MAXIMUM_L2_GAS_PRICE + ); + MAXIMUM_L2_GAS_PRICE + }, + fair_pubdata_price: if fee_model.fair_pubdata_price < MAXIMUM_PUBDATA_PRICE { + fee_model.fair_pubdata_price + } else { + tracing::warn!( + "Fair pubdata price {} exceeds maximum. Limitting to {}", + fee_model.fair_pubdata_price, + MAXIMUM_PUBDATA_PRICE + ); + MAXIMUM_PUBDATA_PRICE + }, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // To test that overflow never happens, we'll use giant L1 gas price, i.e. + // almost realistic very large value of 100k gwei. Since it is so large, we'll also + // use it for the L1 pubdata price. + const GWEI: u64 = 1_000_000_000; + const GIANT_L1_GAS_PRICE: u64 = 100_000 * GWEI; + + // As a small L2 gas price we'll use the value of 1 wei. + const SMALL_L1_GAS_PRICE: u64 = 1; + + #[test] + fn test_compute_batch_fee_model_input_v2_giant_numbers() { + let config = FeeModelConfigV2 { + minimal_l2_gas_price: GIANT_L1_GAS_PRICE, + // We generally don't expect those values to be larger than 1. Still, in theory the operator + // may need to set higher values in extreme cases. + compute_overhead_part: 5.0, + pubdata_overhead_part: 5.0, + // The batch overhead would likely never grow beyond that + batch_overhead_l1_gas: 1_000_000, + // Let's imagine that for some reason the limit is relatively small + max_gas_per_batch: 50_000_000, + // The pubdata will likely never go below that + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + // We'll use scale factor of 3.0 + let input = compute_batch_fee_model_input_v2(params, 3.0, 3.0); + + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE * 3); + assert_eq!(input.fair_l2_gas_price, 130_000_000_000_000); + assert_eq!(input.fair_pubdata_price, 15_300_000_000_000_000); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_small_numbers() { + // Here we assume that the operator wants to make the lives of users as cheap as possible. + let config = FeeModelConfigV2 { + minimal_l2_gas_price: SMALL_L1_GAS_PRICE, + compute_overhead_part: 0.0, + pubdata_overhead_part: 0.0, + batch_overhead_l1_gas: 0, + max_gas_per_batch: 50_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + SMALL_L1_GAS_PRICE, + SMALL_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + + assert_eq!(input.l1_gas_price, SMALL_L1_GAS_PRICE); + assert_eq!(input.fair_l2_gas_price, SMALL_L1_GAS_PRICE); + assert_eq!(input.fair_pubdata_price, SMALL_L1_GAS_PRICE); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_only_pubdata_overhead() { + // Here we use sensible config, but when only pubdata is used to close the batch + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 0.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); + // The fair L2 gas price is identical to the minimal one. + assert_eq!(input.fair_l2_gas_price, 100_000_000_000); + // The fair pubdata price is the minimal one plus the overhead. + assert_eq!(input.fair_pubdata_price, 800_000_000_000_000); + } + + #[test] + fn test_compute_baxtch_fee_model_input_v2_only_compute_overhead() { + // Here we use sensible config, but when only compute is used to close the batch + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 1.0, + pubdata_overhead_part: 0.0, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); + // The fair L2 gas price is identical to the minimal one, plus the overhead + assert_eq!(input.fair_l2_gas_price, 240_000_000_000); + // The fair pubdata price is equal to the original one. + assert_eq!(input.fair_pubdata_price, GIANT_L1_GAS_PRICE); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_param_tweaking() { + // In this test we generally checking that each param behaves as expected + let base_config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let base_params = FeeParamsV2::new( + base_config, + 1_000_000_000, + 1_000_000_000, + BaseTokenConversionRatio::default(), + ); + + let base_input = compute_batch_fee_model_input_v2(base_params, 1.0, 1.0); + + let base_input_larger_l1_gas_price = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + base_config, + 2_000_000_000, // double the L1 gas price + 1_000_000_000, + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + let base_input_scaled_l1_gas_price = + compute_batch_fee_model_input_v2(base_params, 2.0, 1.0); + assert_eq!( + base_input_larger_l1_gas_price, base_input_scaled_l1_gas_price, + "Scaling has the correct effect for the L1 gas price" + ); + assert!( + base_input.fair_l2_gas_price < base_input_larger_l1_gas_price.fair_l2_gas_price, + "L1 gas price increase raises L2 gas price" + ); + assert!( + base_input.fair_pubdata_price < base_input_larger_l1_gas_price.fair_pubdata_price, + "L1 gas price increase raises pubdata price" + ); + + let base_input_larger_pubdata_price = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + base_config, + 1_000_000_000, + 2_000_000_000, // double the L1 pubdata price + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + let base_input_scaled_pubdata_price = + compute_batch_fee_model_input_v2(base_params, 1.0, 2.0); + assert_eq!( + base_input_larger_pubdata_price, base_input_scaled_pubdata_price, + "Scaling has the correct effect for the pubdata price" + ); + assert_eq!( + base_input.fair_l2_gas_price, base_input_larger_pubdata_price.fair_l2_gas_price, + "L1 pubdata increase has no effect on L2 gas price" + ); + assert!( + base_input.fair_pubdata_price < base_input_larger_pubdata_price.fair_pubdata_price, + "Pubdata price increase raises pubdata price" + ); + + let base_input_larger_max_gas = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + FeeModelConfigV2 { + max_gas_per_batch: base_config.max_gas_per_batch * 2, + ..base_config + }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + assert!( + base_input.fair_l2_gas_price > base_input_larger_max_gas.fair_l2_gas_price, + "Max gas increase lowers L2 gas price" + ); + assert_eq!( + base_input.fair_pubdata_price, base_input_larger_max_gas.fair_pubdata_price, + "Max gas increase has no effect on pubdata price" + ); + + let base_input_larger_max_pubdata = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + FeeModelConfigV2 { + max_pubdata_per_batch: base_config.max_pubdata_per_batch * 2, + ..base_config + }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + assert_eq!( + base_input.fair_l2_gas_price, base_input_larger_max_pubdata.fair_l2_gas_price, + "Max pubdata increase has no effect on L2 gas price" + ); + assert!( + base_input.fair_pubdata_price > base_input_larger_max_pubdata.fair_pubdata_price, + "Max pubdata increase lowers pubdata price" + ); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_l1_gas() { + // In this test we check the gas price limit works as expected + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100 * GWEI, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let l1_gas_price = 1_000_000_000 * GWEI; + let params = FeeParamsV2::new( + config, + l1_gas_price, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, l1_gas_price); + // The fair L2 gas price is identical to the maximum + assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); + assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_conversion_rate() { + // In this test we check the gas price limit works as expected + let config = FeeModelConfigV2 { + minimal_l2_gas_price: GWEI, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GWEI, + 2 * GWEI, + BaseTokenConversionRatio { + numerator: NonZeroU64::new(3_000_000).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + }, + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, 3_000_000 * GWEI); + // The fair L2 gas price is identical to the maximum + assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); + assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); + } } diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index a50fc8a655b..69e6e42fd51 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -43,7 +43,6 @@ pub mod l2; pub mod l2_to_l1_log; pub mod priority_op_onchain_data; pub mod protocol_upgrade; -pub mod pubdata_da; pub mod snapshots; pub mod storage; pub mod system_contracts; diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 13e5ecc08ea..6da8e333495 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -10,7 +10,6 @@ use zksync_contracts::{ }; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; -use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, @@ -72,11 +71,7 @@ fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { } fn default_fee() -> Fee { - let fee_input = ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 1.0, - 1.0, - ); + let fee_input = FeeParams::sensible_v1_default().scale(1.0, 1.0); let (max_fee_per_gas, gas_per_pubdata_limit) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::default().into()); Fee { diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index c83279709a3..d8080f1dba5 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -22,7 +22,6 @@ use zksync_multivm::interface::{ TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionMetrics, }; -use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, @@ -476,11 +475,7 @@ async fn store_events( } fn scaled_sensible_fee_input(scale: f64) -> BatchFeeInput { - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - scale, - scale, - ) + FeeParams::sensible_v1_default().scale(scale, scale) } #[derive(Debug)] diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index 20ba43a4166..e13e479117c 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -19,7 +19,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, ethabi::Token, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, Address, L1BatchNumber, ProtocolVersionId, H256, U256, }; @@ -224,7 +224,7 @@ impl LocalL1BatchCommitData { .context("cannot detect DA source from reference commitment token")?; // For `PubdataDA::Calldata`, it's required that the pubdata fits into a single blob. - if matches!(da, PubdataDA::Calldata) { + if matches!(da, PubdataSendingMode::Calldata) { let pubdata_len = self .l1_batch .header @@ -258,7 +258,7 @@ impl LocalL1BatchCommitData { pub fn detect_da( protocol_version: ProtocolVersionId, reference: &Token, -) -> Result { +) -> Result { /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; const PUBDATA_SOURCE_BLOBS: u8 = 1; @@ -269,7 +269,7 @@ pub fn detect_da( } if protocol_version.is_pre_1_4_2() { - return Ok(PubdataDA::Calldata); + return Ok(PubdataSendingMode::Calldata); } let reference = match reference { @@ -291,9 +291,9 @@ pub fn detect_da( ))), }; match last_reference_token.first() { - Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataDA::Calldata), - Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataDA::Blobs), - Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataDA::Custom), + Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataSendingMode::Calldata), + Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataSendingMode::Blobs), + Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataSendingMode::Custom), Some(&byte) => Err(parse_error(format!( "unexpected first byte of the last reference token; expected one of [{PUBDATA_SOURCE_CALLDATA}, {PUBDATA_SOURCE_BLOBS}], \ got {byte}" diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index 40c447071cf..b09ef2b2272 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -64,7 +64,7 @@ pub(crate) fn build_commit_tx_input_data( let tokens = CommitBatches { last_committed_l1_batch: &batches[0], l1_batches: batches, - pubdata_da: PubdataDA::Calldata, + pubdata_da: PubdataSendingMode::Calldata, mode, } .into_tokens(); @@ -167,7 +167,7 @@ fn build_commit_tx_input_data_is_correct(commitment_mode: L1BatchCommitmentMode) .unwrap(); assert_eq!( commit_data, - CommitBatchInfo::new(commitment_mode, batch, PubdataDA::Calldata).into_token(), + CommitBatchInfo::new(commitment_mode, batch, PubdataSendingMode::Calldata).into_token(), ); } } diff --git a/core/node/eth_sender/src/aggregated_operations.rs b/core/node/eth_sender/src/aggregated_operations.rs index 2dfaf594265..5271d42d3b7 100644 --- a/core/node/eth_sender/src/aggregated_operations.rs +++ b/core/node/eth_sender/src/aggregated_operations.rs @@ -3,13 +3,17 @@ use std::ops; use zksync_l1_contract_interface::i_executor::methods::{ExecuteBatches, ProveBatches}; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, - pubdata_da::PubdataDA, L1BatchNumber, ProtocolVersionId, + pubdata_da::PubdataSendingMode, L1BatchNumber, ProtocolVersionId, }; #[allow(clippy::large_enum_variant)] #[derive(Debug, Clone)] pub enum AggregatedOperation { - Commit(L1BatchWithMetadata, Vec, PubdataDA), + Commit( + L1BatchWithMetadata, + Vec, + PubdataSendingMode, + ), PublishProofOnchain(ProveBatches), Execute(ExecuteBatches), } diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index 4045e9ca3d8..432804a21b2 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -11,7 +11,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, helpers::unix_timestamp_ms, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, L1BatchNumber, ProtocolVersionId, }; @@ -36,7 +36,7 @@ pub struct Aggregator { /// means no wait is needed: nonces will still provide the correct ordering of /// transactions. operate_4844_mode: bool, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, commitment_mode: L1BatchCommitmentMode, } @@ -47,8 +47,7 @@ impl Aggregator { operate_4844_mode: bool, commitment_mode: L1BatchCommitmentMode, ) -> Self { - let pubdata_da = config.pubdata_sending_mode.into(); - + let pubdata_da = config.pubdata_sending_mode; Self { commit_criteria: vec![ Box::from(NumberCriterion { @@ -476,7 +475,7 @@ impl Aggregator { } } - pub fn pubdata_da(&self) -> PubdataDA { + pub fn pubdata_da(&self) -> PubdataSendingMode { self.pubdata_da } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index a08d16f456a..ac9ed4aaaad 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -19,7 +19,7 @@ use zksync_types::{ ethabi::{Function, Token}, l2_to_l1_log::UserL2ToL1Log, protocol_version::{L1VerifierConfig, PACKED_SEMVER_MINOR_MASK}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, settlement::SettlementMode, web3::{contract::Error as Web3ContractError, BlockNumber}, Address, L2ChainId, ProtocolVersionId, SLChainId, H256, U256, @@ -505,11 +505,12 @@ impl EthTxAggregator { ) }; - let l1_batch_for_sidecar = if PubdataDA::Blobs == self.aggregator.pubdata_da() { - Some(l1_batches[0].clone()) - } else { - None - }; + let l1_batch_for_sidecar = + if PubdataSendingMode::Blobs == self.aggregator.pubdata_da() { + Some(l1_batches[0].clone()) + } else { + None + }; Self::encode_commit_data(encoding_fn, &commit_data, l1_batch_for_sidecar) } diff --git a/core/node/eth_sender/src/publish_criterion.rs b/core/node/eth_sender/src/publish_criterion.rs index 52d861ce0af..30f0820b148 100644 --- a/core/node/eth_sender/src/publish_criterion.rs +++ b/core/node/eth_sender/src/publish_criterion.rs @@ -8,7 +8,7 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, L1BatchNumber, }; @@ -202,7 +202,7 @@ impl L1BatchPublishCriterion for GasCriterion { pub struct DataSizeCriterion { pub op: AggregatedActionType, pub data_limit: usize, - pub pubdata_da: PubdataDA, + pub pubdata_da: PubdataSendingMode, pub commitment_mode: L1BatchCommitmentMode, } diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 86a8c477f9f..646df1dc1a7 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use zksync_config::{ - configs::eth_sender::{ProofSendingMode, PubdataSendingMode, SenderConfig}, + configs::eth_sender::{ProofSendingMode, SenderConfig}, ContractsConfig, EthConfig, GasAdjusterConfig, }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; @@ -12,7 +12,7 @@ use zksync_node_test_utils::{create_l1_batch, l1_batch_metadata_to_commitment_ar use zksync_object_store::MockObjectStore; use zksync_types::{ aggregated_operations::AggregatedActionType, block::L1BatchHeader, - commitment::L1BatchCommitmentMode, eth_sender::EthTx, pubdata_da::PubdataDA, + commitment::L1BatchCommitmentMode, eth_sender::EthTx, pubdata_da::PubdataSendingMode, settlement::SettlementMode, Address, L1BatchNumber, ProtocolVersion, H256, }; @@ -485,9 +485,9 @@ impl EthSenderTester { pub async fn save_commit_tx(&mut self, l1_batch_number: L1BatchNumber) -> EthTx { assert_eq!(l1_batch_number, self.next_l1_batch_number_to_commit); let pubdata_mode = if self.pubdata_sending_mode == PubdataSendingMode::Blobs { - PubdataDA::Blobs + PubdataSendingMode::Blobs } else { - PubdataDA::Calldata + PubdataSendingMode::Calldata }; let operation = AggregatedOperation::Commit( l1_batch_with_metadata( diff --git a/core/node/fee_model/Cargo.toml b/core/node/fee_model/Cargo.toml index 8760b97d9db..a84a7c5c217 100644 --- a/core/node/fee_model/Cargo.toml +++ b/core/node/fee_model/Cargo.toml @@ -16,9 +16,7 @@ zksync_types.workspace = true zksync_dal.workspace = true zksync_config.workspace = true zksync_eth_client.workspace = true -zksync_utils.workspace = true zksync_web3_decl.workspace = true -bigdecimal.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 27cdc7f5d5e..459b8855b96 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -6,9 +6,12 @@ use std::{ }; use tokio::sync::watch; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; +use zksync_config::GasAdjusterConfig; use zksync_eth_client::EthFeeInterface; -use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U256}; +use zksync_types::{ + commitment::L1BatchCommitmentMode, pubdata_da::PubdataSendingMode, L1_GAS_PER_PUBDATA_BYTE, + U256, +}; use zksync_web3_decl::client::{DynClient, L1, L2}; use self::metrics::METRICS; diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 47023203de0..ab649e2d7c9 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -1,9 +1,11 @@ use std::{collections::VecDeque, sync::RwLockReadGuard}; use test_casing::test_casing; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; +use zksync_config::GasAdjusterConfig; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; -use zksync_types::{commitment::L1BatchCommitmentMode, settlement::SettlementMode}; +use zksync_types::{ + commitment::L1BatchCommitmentMode, pubdata_da::PubdataSendingMode, settlement::SettlementMode, +}; use zksync_web3_decl::client::L2; use super::{GasAdjuster, GasStatistics, GasStatisticsInner}; diff --git a/core/node/fee_model/src/lib.rs b/core/node/fee_model/src/lib.rs index fe4f6a27ce2..380a279cccc 100644 --- a/core/node/fee_model/src/lib.rs +++ b/core/node/fee_model/src/lib.rs @@ -3,14 +3,9 @@ use std::{fmt, fmt::Debug, sync::Arc}; use anyhow::Context as _; use async_trait::async_trait; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_types::{ - fee_model::{ - BaseTokenConversionRatio, BatchFeeInput, FeeModelConfig, FeeModelConfigV2, FeeParams, - FeeParamsV1, FeeParamsV2, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput, - }, - U256, +use zksync_types::fee_model::{ + BaseTokenConversionRatio, BatchFeeInput, FeeModelConfig, FeeParams, FeeParamsV1, FeeParamsV2, }; -use zksync_utils::ceil_div_u256; use crate::l1_gas_price::GasAdjuster; @@ -34,13 +29,7 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { l1_pubdata_price_scale_factor: f64, ) -> anyhow::Result { let params = self.get_fee_model_params(); - Ok( - ::default_batch_fee_input_scaled( - params, - l1_gas_price_scale_factor, - l1_pubdata_price_scale_factor, - ), - ) + Ok(params.scale(l1_gas_price_scale_factor, l1_pubdata_price_scale_factor)) } /// Returns the fee model parameters using the denomination of the base token used (WEI for ETH). @@ -48,27 +37,6 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { } impl dyn BatchFeeModelInputProvider { - /// Provides the default implementation of `get_batch_fee_input_scaled()` given [`FeeParams`]. - pub fn default_batch_fee_input_scaled( - params: FeeParams, - l1_gas_price_scale_factor: f64, - l1_pubdata_price_scale_factor: f64, - ) -> BatchFeeInput { - match params { - FeeParams::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( - params, - l1_gas_price_scale_factor, - )), - FeeParams::V2(params) => BatchFeeInput::PubdataIndependent( - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2( - params, - l1_gas_price_scale_factor, - l1_pubdata_price_scale_factor, - )), - ), - } - } - /// Returns the batch fee input as-is, i.e. without any scaling for the L1 gas and pubdata prices. pub async fn get_batch_fee_input(&self) -> anyhow::Result { self.get_batch_fee_input_scaled(1.0, 1.0).await @@ -168,122 +136,6 @@ impl BatchFeeModelInputProvider for ApiFeeInputProvider { } } -/// Calculates the batch fee input based on the main node parameters. -/// This function uses the `V1` fee model, i.e. where the pubdata price does not include the proving costs. -fn compute_batch_fee_model_input_v1( - params: FeeParamsV1, - l1_gas_price_scale_factor: f64, -) -> L1PeggedBatchFeeModelInput { - let l1_gas_price = (params.l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; - - L1PeggedBatchFeeModelInput { - l1_gas_price, - fair_l2_gas_price: params.config.minimal_l2_gas_price, - } -} - -/// Calculates the batch fee input based on the main node parameters. -/// This function uses the `V2` fee model, i.e. where the pubdata price does not include the proving costs. -fn compute_batch_fee_model_input_v2( - params: FeeParamsV2, - l1_gas_price_scale_factor: f64, - l1_pubdata_price_scale_factor: f64, -) -> PubdataIndependentBatchFeeModelInput { - let config = params.config(); - let l1_gas_price = params.l1_gas_price(); - let l1_pubdata_price = params.l1_pubdata_price(); - - let FeeModelConfigV2 { - minimal_l2_gas_price, - compute_overhead_part, - pubdata_overhead_part, - batch_overhead_l1_gas, - max_gas_per_batch, - max_pubdata_per_batch, - } = config; - - // Firstly, we scale the gas price and pubdata price in case it is needed. - let l1_gas_price = (l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; - let l1_pubdata_price = (l1_pubdata_price as f64 * l1_pubdata_price_scale_factor) as u64; - - // While the final results of the calculations are not expected to have any overflows, the intermediate computations - // might, so we use U256 for them. - let l1_batch_overhead_wei = U256::from(l1_gas_price) * U256::from(batch_overhead_l1_gas); - - let fair_l2_gas_price = { - // Firstly, we calculate which part of the overall overhead each unit of L2 gas should cover. - let l1_batch_overhead_per_gas = - ceil_div_u256(l1_batch_overhead_wei, U256::from(max_gas_per_batch)); - - // Then, we multiply by the `compute_overhead_part` to get the overhead for the computation for each gas. - // Also, this means that if we almost never close batches because of compute, the `compute_overhead_part` should be zero and so - // it is possible that the computation costs include for no overhead. - let gas_overhead_wei = - (l1_batch_overhead_per_gas.as_u64() as f64 * compute_overhead_part) as u64; - - // We sum up the minimal L2 gas price (i.e. the raw prover/compute cost of a single L2 gas) and the overhead for batch being closed. - minimal_l2_gas_price + gas_overhead_wei - }; - - let fair_pubdata_price = { - // Firstly, we calculate which part of the overall overhead each pubdata byte should cover. - let l1_batch_overhead_per_pubdata = - ceil_div_u256(l1_batch_overhead_wei, U256::from(max_pubdata_per_batch)); - - // Then, we multiply by the `pubdata_overhead_part` to get the overhead for each pubdata byte. - // Also, this means that if we almost never close batches because of pubdata, the `pubdata_overhead_part` should be zero and so - // it is possible that the pubdata costs include no overhead. - let pubdata_overhead_wei = - (l1_batch_overhead_per_pubdata.as_u64() as f64 * pubdata_overhead_part) as u64; - - // We sum up the raw L1 pubdata price (i.e. the expected price of publishing a single pubdata byte) and the overhead for batch being closed. - l1_pubdata_price + pubdata_overhead_wei - }; - - PubdataIndependentBatchFeeModelInput { - l1_gas_price, - fair_l2_gas_price, - fair_pubdata_price, - } -} - -/// Bootloader places limitations on fair_l2_gas_price and fair_pubdata_price. -/// (MAX_ALLOWED_FAIR_L2_GAS_PRICE and MAX_ALLOWED_FAIR_PUBDATA_PRICE in bootloader code respectively) -/// Server needs to clip this prices in order to allow chain continues operation at a loss. The alternative -/// would be to stop accepting the transactions until the conditions improve. -/// TODO (PE-153): to be removed when bootloader limitation is removed -fn clip_batch_fee_model_input_v2( - fee_model: PubdataIndependentBatchFeeModelInput, -) -> PubdataIndependentBatchFeeModelInput { - /// MAX_ALLOWED_FAIR_L2_GAS_PRICE - const MAXIMUM_L2_GAS_PRICE: u64 = 10_000_000_000_000; - /// MAX_ALLOWED_FAIR_PUBDATA_PRICE - const MAXIMUM_PUBDATA_PRICE: u64 = 1_000_000_000_000_000; - PubdataIndependentBatchFeeModelInput { - l1_gas_price: fee_model.l1_gas_price, - fair_l2_gas_price: if fee_model.fair_l2_gas_price < MAXIMUM_L2_GAS_PRICE { - fee_model.fair_l2_gas_price - } else { - tracing::warn!( - "Fair l2 gas price {} exceeds maximum. Limitting to {}", - fee_model.fair_l2_gas_price, - MAXIMUM_L2_GAS_PRICE - ); - MAXIMUM_L2_GAS_PRICE - }, - fair_pubdata_price: if fee_model.fair_pubdata_price < MAXIMUM_PUBDATA_PRICE { - fee_model.fair_pubdata_price - } else { - tracing::warn!( - "Fair pubdata price {} exceeds maximum. Limitting to {}", - fee_model.fair_pubdata_price, - MAXIMUM_PUBDATA_PRICE - ); - MAXIMUM_PUBDATA_PRICE - }, - } -} - /// Mock [`BatchFeeModelInputProvider`] implementation that returns a constant value. /// Intended to be used in tests only. #[derive(Debug)] @@ -307,308 +159,17 @@ mod tests { use std::num::NonZeroU64; use l1_gas_price::GasAdjusterClient; - use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; + use zksync_config::GasAdjusterConfig; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; - use zksync_types::{commitment::L1BatchCommitmentMode, fee_model::BaseTokenConversionRatio}; + use zksync_types::{ + commitment::L1BatchCommitmentMode, + fee_model::{BaseTokenConversionRatio, FeeModelConfigV2}, + pubdata_da::PubdataSendingMode, + U256, + }; use super::*; - // To test that overflow never happens, we'll use giant L1 gas price, i.e. - // almost realistic very large value of 100k gwei. Since it is so large, we'll also - // use it for the L1 pubdata price. - const GWEI: u64 = 1_000_000_000; - const GIANT_L1_GAS_PRICE: u64 = 100_000 * GWEI; - - // As a small L2 gas price we'll use the value of 1 wei. - const SMALL_L1_GAS_PRICE: u64 = 1; - - #[test] - fn test_compute_batch_fee_model_input_v2_giant_numbers() { - let config = FeeModelConfigV2 { - minimal_l2_gas_price: GIANT_L1_GAS_PRICE, - // We generally don't expect those values to be larger than 1. Still, in theory the operator - // may need to set higher values in extreme cases. - compute_overhead_part: 5.0, - pubdata_overhead_part: 5.0, - // The batch overhead would likely never grow beyond that - batch_overhead_l1_gas: 1_000_000, - // Let's imagine that for some reason the limit is relatively small - max_gas_per_batch: 50_000_000, - // The pubdata will likely never go below that - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - // We'll use scale factor of 3.0 - let input = compute_batch_fee_model_input_v2(params, 3.0, 3.0); - - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE * 3); - assert_eq!(input.fair_l2_gas_price, 130_000_000_000_000); - assert_eq!(input.fair_pubdata_price, 15_300_000_000_000_000); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_small_numbers() { - // Here we assume that the operator wants to make the lives of users as cheap as possible. - let config = FeeModelConfigV2 { - minimal_l2_gas_price: SMALL_L1_GAS_PRICE, - compute_overhead_part: 0.0, - pubdata_overhead_part: 0.0, - batch_overhead_l1_gas: 0, - max_gas_per_batch: 50_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - SMALL_L1_GAS_PRICE, - SMALL_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - - assert_eq!(input.l1_gas_price, SMALL_L1_GAS_PRICE); - assert_eq!(input.fair_l2_gas_price, SMALL_L1_GAS_PRICE); - assert_eq!(input.fair_pubdata_price, SMALL_L1_GAS_PRICE); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_only_pubdata_overhead() { - // Here we use sensible config, but when only pubdata is used to close the batch - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 0.0, - pubdata_overhead_part: 1.0, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); - // The fair L2 gas price is identical to the minimal one. - assert_eq!(input.fair_l2_gas_price, 100_000_000_000); - // The fair pubdata price is the minimal one plus the overhead. - assert_eq!(input.fair_pubdata_price, 800_000_000_000_000); - } - - #[test] - fn test_compute_baxtch_fee_model_input_v2_only_compute_overhead() { - // Here we use sensible config, but when only compute is used to close the batch - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 1.0, - pubdata_overhead_part: 0.0, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); - // The fair L2 gas price is identical to the minimal one, plus the overhead - assert_eq!(input.fair_l2_gas_price, 240_000_000_000); - // The fair pubdata price is equal to the original one. - assert_eq!(input.fair_pubdata_price, GIANT_L1_GAS_PRICE); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_param_tweaking() { - // In this test we generally checking that each param behaves as expected - let base_config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let base_params = FeeParamsV2::new( - base_config, - 1_000_000_000, - 1_000_000_000, - BaseTokenConversionRatio::default(), - ); - - let base_input = compute_batch_fee_model_input_v2(base_params, 1.0, 1.0); - - let base_input_larger_l1_gas_price = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - base_config, - 2_000_000_000, // double the L1 gas price - 1_000_000_000, - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - let base_input_scaled_l1_gas_price = - compute_batch_fee_model_input_v2(base_params, 2.0, 1.0); - assert_eq!( - base_input_larger_l1_gas_price, base_input_scaled_l1_gas_price, - "Scaling has the correct effect for the L1 gas price" - ); - assert!( - base_input.fair_l2_gas_price < base_input_larger_l1_gas_price.fair_l2_gas_price, - "L1 gas price increase raises L2 gas price" - ); - assert!( - base_input.fair_pubdata_price < base_input_larger_l1_gas_price.fair_pubdata_price, - "L1 gas price increase raises pubdata price" - ); - - let base_input_larger_pubdata_price = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - base_config, - 1_000_000_000, - 2_000_000_000, // double the L1 pubdata price - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - let base_input_scaled_pubdata_price = - compute_batch_fee_model_input_v2(base_params, 1.0, 2.0); - assert_eq!( - base_input_larger_pubdata_price, base_input_scaled_pubdata_price, - "Scaling has the correct effect for the pubdata price" - ); - assert_eq!( - base_input.fair_l2_gas_price, base_input_larger_pubdata_price.fair_l2_gas_price, - "L1 pubdata increase has no effect on L2 gas price" - ); - assert!( - base_input.fair_pubdata_price < base_input_larger_pubdata_price.fair_pubdata_price, - "Pubdata price increase raises pubdata price" - ); - - let base_input_larger_max_gas = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - FeeModelConfigV2 { - max_gas_per_batch: base_config.max_gas_per_batch * 2, - ..base_config - }, - base_params.l1_gas_price(), - base_params.l1_pubdata_price(), - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - assert!( - base_input.fair_l2_gas_price > base_input_larger_max_gas.fair_l2_gas_price, - "Max gas increase lowers L2 gas price" - ); - assert_eq!( - base_input.fair_pubdata_price, base_input_larger_max_gas.fair_pubdata_price, - "Max gas increase has no effect on pubdata price" - ); - - let base_input_larger_max_pubdata = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - FeeModelConfigV2 { - max_pubdata_per_batch: base_config.max_pubdata_per_batch * 2, - ..base_config - }, - base_params.l1_gas_price(), - base_params.l1_pubdata_price(), - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - assert_eq!( - base_input.fair_l2_gas_price, base_input_larger_max_pubdata.fair_l2_gas_price, - "Max pubdata increase has no effect on L2 gas price" - ); - assert!( - base_input.fair_pubdata_price > base_input_larger_max_pubdata.fair_pubdata_price, - "Max pubdata increase lowers pubdata price" - ); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_l1_gas() { - // In this test we check the gas price limit works as expected - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100 * GWEI, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let l1_gas_price = 1_000_000_000 * GWEI; - let params = FeeParamsV2::new( - config, - l1_gas_price, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, l1_gas_price); - // The fair L2 gas price is identical to the maximum - assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); - assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_conversion_rate() { - // In this test we check the gas price limit works as expected - let config = FeeModelConfigV2 { - minimal_l2_gas_price: GWEI, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GWEI, - 2 * GWEI, - BaseTokenConversionRatio { - numerator: NonZeroU64::new(3_000_000).unwrap(), - denominator: NonZeroU64::new(1).unwrap(), - }, - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, 3_000_000 * GWEI); - // The fair L2 gas price is identical to the maximum - assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); - assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); - } - #[derive(Debug, Clone)] struct DummyTokenRatioProvider { ratio: BaseTokenConversionRatio, diff --git a/core/node/node_framework/src/implementations/layers/gas_adjuster.rs b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs index 229700289a7..241c4d829be 100644 --- a/core/node/node_framework/src/implementations/layers/gas_adjuster.rs +++ b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use anyhow::Context; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig, GenesisConfig}; +use zksync_config::{GasAdjusterConfig, GenesisConfig}; use zksync_node_fee_model::l1_gas_price::GasAdjuster; +use zksync_types::pubdata_da::PubdataSendingMode; use crate::{ implementations::resources::{ diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index 35c4bc3fc20..28f81bb4543 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use zksync_config::configs::chain::StateKeeperConfig; +use zksync_config::configs::chain::{FeeModelVersion, StateKeeperConfig}; use zksync_node_fee_model::{ApiFeeInputProvider, MainNodeFeeInputProvider}; -use zksync_types::fee_model::FeeModelConfig; +use zksync_types::fee_model::{FeeModelConfig, FeeModelConfigV1, FeeModelConfigV2}; use crate::{ implementations::resources::{ @@ -20,7 +20,7 @@ use crate::{ /// Adds several resources that depend on L1 gas price. #[derive(Debug)] pub struct L1GasLayer { - state_keeper_config: StateKeeperConfig, + fee_model_config: FeeModelConfig, } #[derive(Debug, FromContext)] @@ -42,9 +42,25 @@ pub struct Output { } impl L1GasLayer { - pub fn new(state_keeper_config: StateKeeperConfig) -> Self { + pub fn new(state_keeper_config: &StateKeeperConfig) -> Self { Self { - state_keeper_config, + fee_model_config: Self::map_config(state_keeper_config), + } + } + + fn map_config(state_keeper_config: &StateKeeperConfig) -> FeeModelConfig { + match state_keeper_config.fee_model_version { + FeeModelVersion::V1 => FeeModelConfig::V1(FeeModelConfigV1 { + minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, + }), + FeeModelVersion::V2 => FeeModelConfig::V2(FeeModelConfigV2 { + minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, + compute_overhead_part: state_keeper_config.compute_overhead_part, + pubdata_overhead_part: state_keeper_config.pubdata_overhead_part, + batch_overhead_l1_gas: state_keeper_config.batch_overhead_l1_gas, + max_gas_per_batch: state_keeper_config.max_gas_per_batch, + max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, + }), } } } @@ -64,7 +80,7 @@ impl WiringLayer for L1GasLayer { let main_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( input.gas_adjuster.0.clone(), ratio_provider.0, - FeeModelConfig::from_state_keeper_config(&self.state_keeper_config), + self.fee_model_config, )); let replica_pool = input.replica_pool.get().await?; diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 062fc426e8c..ad189831bad 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -4,7 +4,7 @@ use std::{slice, sync::Arc, time::Duration}; use zksync_base_token_adjuster::NoOpRatioProvider; use zksync_config::{ - configs::{chain::StateKeeperConfig, eth_sender::PubdataSendingMode, wallets::Wallets}, + configs::{chain::StateKeeperConfig, wallets::Wallets}, GasAdjusterConfig, }; use zksync_contracts::BaseSystemContracts; @@ -28,6 +28,7 @@ use zksync_types::{ fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV2}, l2::L2Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, + pubdata_da::PubdataSendingMode, system_contracts::get_system_smart_contracts, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, H256, }; diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 1408f2b23cd..dd2df67902f 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8522,7 +8522,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -8530,7 +8529,6 @@ dependencies = [ "thiserror", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", diff --git a/zkstack_cli/Cargo.lock b/zkstack_cli/Cargo.lock index 8750de36c75..63561c02b9d 100644 --- a/zkstack_cli/Cargo.lock +++ b/zkstack_cli/Cargo.lock @@ -6977,7 +6977,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -6985,7 +6984,6 @@ dependencies = [ "thiserror", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", From d6de4f40ddce339c760c95e2bf4b8aceb571af7f Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Mon, 21 Oct 2024 22:17:08 +1100 Subject: [PATCH 097/140] feat(external-node): save protocol version before opening a batch (#3136) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Moved protocol version fetching a bit earlier in the flow for EN so that it is present by the time we insert the corresponding unsealed batch. ## Why ❔ Persisted unsealed batches now uniformly have protocol version present in them (both main and external nodes). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/dal/src/protocol_versions_dal.rs | 41 ++++- core/node/node_sync/src/external_io.rs | 211 +++++++++++++++++----- 2 files changed, 201 insertions(+), 51 deletions(-) diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 3382d8c836e..fcc756e3006 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -190,6 +190,43 @@ impl ProtocolVersionsDal<'_, '_> { ProtocolVersionId::try_from(row.id as u16).map_err(|err| sqlx::Error::Decode(err.into())) } + /// Returns base system contracts' hashes. Prefer `load_base_system_contracts_by_version_id` if + /// you also want to load the contracts themselves AND expect the contracts to be in the DB + /// already. + pub async fn get_base_system_contract_hashes_by_version_id( + &mut self, + version_id: u16, + ) -> anyhow::Result> { + let row = sqlx::query!( + r#" + SELECT + bootloader_code_hash, + default_account_code_hash, + evm_emulator_code_hash + FROM + protocol_versions + WHERE + id = $1 + "#, + i32::from(version_id) + ) + .instrument("get_base_system_contract_hashes_by_version_id") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) + .await + .context("cannot fetch system contract hashes")?; + + Ok(if let Some(row) = row { + Some(BaseSystemContractsHashes { + bootloader: H256::from_slice(&row.bootloader_code_hash), + default_aa: H256::from_slice(&row.default_account_code_hash), + evm_emulator: row.evm_emulator_code_hash.as_deref().map(H256::from_slice), + }) + } else { + None + }) + } + pub async fn load_base_system_contracts_by_version_id( &mut self, version_id: u16, @@ -207,7 +244,9 @@ impl ProtocolVersionsDal<'_, '_> { "#, i32::from(version_id) ) - .fetch_optional(self.storage.conn()) + .instrument("load_base_system_contracts_by_version_id") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) .await .context("cannot fetch system contract hashes")?; diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 5e3a5ce9f46..a0be233a002 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -104,6 +104,63 @@ impl ExternalIO { } }) } + + async fn ensure_protocol_version_is_saved( + &self, + protocol_version: ProtocolVersionId, + ) -> anyhow::Result<()> { + let base_system_contract_hashes = self + .pool + .connection_tagged("sync_layer") + .await? + .protocol_versions_dal() + .get_base_system_contract_hashes_by_version_id(protocol_version as u16) + .await?; + if base_system_contract_hashes.is_some() { + return Ok(()); + } + tracing::info!("Fetching protocol version {protocol_version:?} from the main node"); + + let protocol_version = self + .main_node_client + .fetch_protocol_version(protocol_version) + .await + .context("failed to fetch protocol version from the main node")? + .context("protocol version is missing on the main node")?; + let minor = protocol_version + .minor_version() + .context("Missing minor protocol version")?; + let bootloader_code_hash = protocol_version + .bootloader_code_hash() + .context("Missing bootloader code hash")?; + let default_account_code_hash = protocol_version + .default_account_code_hash() + .context("Missing default account code hash")?; + let evm_emulator_code_hash = protocol_version.evm_emulator_code_hash(); + let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); + self.pool + .connection_tagged("sync_layer") + .await? + .protocol_versions_dal() + .save_protocol_version( + ProtocolSemanticVersion { + minor: minor + .try_into() + .context("cannot convert protocol version")?, + patch: VersionPatch(0), + }, + protocol_version.timestamp, + Default::default(), // verification keys are unused for EN + BaseSystemContractsHashes { + bootloader: bootloader_code_hash, + default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, + }, + l2_system_upgrade_tx_hash, + ) + .await?; + Ok(()) + } } impl IoSealCriteria for ExternalIO { @@ -254,6 +311,8 @@ impl StateKeeperIO for ExternalIO { cursor.next_l2_block ); + self.ensure_protocol_version_is_saved(params.protocol_version) + .await?; self.pool .connection_tagged("sync_layer") .await? @@ -261,7 +320,7 @@ impl StateKeeperIO for ExternalIO { .insert_l1_batch(UnsealedL1BatchHeader { number: cursor.l1_batch, timestamp: params.first_l2_block.timestamp, - protocol_version: None, + protocol_version: Some(params.protocol_version), fee_address: params.operator_address, fee_input: params.fee_input, }) @@ -351,63 +410,21 @@ impl StateKeeperIO for ExternalIO { .connection_tagged("sync_layer") .await? .protocol_versions_dal() - .load_base_system_contracts_by_version_id(protocol_version as u16) - .await - .context("failed loading base system contracts")?; - - if let Some(contracts) = base_system_contracts { - return Ok(contracts); - } - tracing::info!("Fetching protocol version {protocol_version:?} from the main node"); - - let protocol_version = self - .main_node_client - .fetch_protocol_version(protocol_version) - .await - .context("failed to fetch protocol version from the main node")? - .context("protocol version is missing on the main node")?; - let minor = protocol_version - .minor_version() - .context("Missing minor protocol version")?; - let bootloader_code_hash = protocol_version - .bootloader_code_hash() - .context("Missing bootloader code hash")?; - let default_account_code_hash = protocol_version - .default_account_code_hash() - .context("Missing default account code hash")?; - let evm_emulator_code_hash = protocol_version.evm_emulator_code_hash(); - let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); - self.pool - .connection_tagged("sync_layer") + .get_base_system_contract_hashes_by_version_id(protocol_version as u16) .await? - .protocol_versions_dal() - .save_protocol_version( - ProtocolSemanticVersion { - minor: minor - .try_into() - .context("cannot convert protocol version")?, - patch: VersionPatch(0), - }, - protocol_version.timestamp, - Default::default(), // verification keys are unused for EN - BaseSystemContractsHashes { - bootloader: bootloader_code_hash, - default_aa: default_account_code_hash, - evm_emulator: evm_emulator_code_hash, - }, - l2_system_upgrade_tx_hash, - ) - .await?; + .with_context(|| { + format!("Cannot load base system contracts' hashes for {protocol_version:?}. They should already be present") + })?; let bootloader = self - .get_base_system_contract(bootloader_code_hash, cursor.next_l2_block) + .get_base_system_contract(base_system_contracts.bootloader, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch bootloader code for {protocol_version:?}"))?; let default_aa = self - .get_base_system_contract(default_account_code_hash, cursor.next_l2_block) + .get_base_system_contract(base_system_contracts.default_aa, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch default AA code for {protocol_version:?}"))?; - let evm_emulator = if let Some(hash) = evm_emulator_code_hash { + let evm_emulator = if let Some(hash) = base_system_contracts.evm_emulator { Some( self.get_base_system_contract(hash, cursor.next_l2_block) .await @@ -459,3 +476,97 @@ impl StateKeeperIO for ExternalIO { Ok(hash) } } + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use zksync_dal::{ConnectionPool, CoreDal}; + use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; + use zksync_state_keeper::{io::L1BatchParams, L2BlockParams, StateKeeperIO}; + use zksync_types::{ + api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, + H256, + }; + + use crate::{sync_action::SyncAction, testonly::MockMainNodeClient, ActionQueue, ExternalIO}; + + #[tokio::test] + async fn insert_batch_with_protocol_version() { + // Whenever ExternalIO inserts an unsealed batch into DB it should populate it with protocol + // version and make sure that it is present in the DB (i.e. fetch it from main node if not). + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + insert_genesis_batch(&mut conn, &GenesisParams::mock()) + .await + .unwrap(); + let (actions_sender, action_queue) = ActionQueue::new(); + let mut client = MockMainNodeClient::default(); + let next_protocol_version = api::ProtocolVersion { + minor_version: Some(ProtocolVersionId::next() as u16), + timestamp: 1, + bootloader_code_hash: Some(H256::repeat_byte(1)), + default_account_code_hash: Some(H256::repeat_byte(1)), + evm_emulator_code_hash: Some(H256::repeat_byte(1)), + ..api::ProtocolVersion::default() + }; + client.insert_protocol_version(next_protocol_version.clone()); + let mut external_io = ExternalIO::new( + pool.clone(), + action_queue, + Box::new(client), + L2ChainId::default(), + ) + .unwrap(); + + let (cursor, _) = external_io.initialize().await.unwrap(); + let params = L1BatchParams { + protocol_version: ProtocolVersionId::next(), + validation_computational_gas_limit: u32::MAX, + operator_address: Default::default(), + fee_input: BatchFeeInput::pubdata_independent(2, 3, 4), + first_l2_block: L2BlockParams { + timestamp: 1, + virtual_blocks: 1, + }, + }; + actions_sender + .push_action_unchecked(SyncAction::OpenBatch { + params: params.clone(), + number: L1BatchNumber(1), + first_l2_block_number: L2BlockNumber(1), + }) + .await + .unwrap(); + let fetched_params = external_io + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .unwrap(); + assert_eq!(fetched_params, params); + + // Verify that the next protocol version is in DB + let fetched_protocol_version = conn + .protocol_versions_dal() + .get_protocol_version_with_latest_patch(ProtocolVersionId::next()) + .await + .unwrap() + .unwrap(); + assert_eq!( + fetched_protocol_version.version.minor as u16, + next_protocol_version.minor_version.unwrap() + ); + + // Verify that the unsealed batch has protocol version + let unsealed_batch = conn + .blocks_dal() + .get_unsealed_l1_batch() + .await + .unwrap() + .unwrap(); + assert_eq!( + unsealed_batch.protocol_version, + Some(fetched_protocol_version.version.minor) + ); + } +} From cd160830a0b7ebe5af4ecbd944da1cd51af3528a Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 21 Oct 2024 15:35:40 +0300 Subject: [PATCH 098/140] fix(mempool): minor mempool improvements (#3113) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fixes two mempool issues: 1) `gc` does not really ensure `size <= capatity` (found by @jcsec-security), fix: purge some accounts with lowest score from priority queue 2) Mempool actor built l2 tx filter based on current l1 gas prices. This doesn't make sense when there is an open batch, if there is some then we should use its fee params. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/mempool/src/mempool_store.rs | 55 +++++++++++++++++---- core/lib/mempool/src/tests.rs | 50 +++++++++++++------ core/node/state_keeper/src/mempool_actor.rs | 31 +++++++++--- 3 files changed, 104 insertions(+), 32 deletions(-) diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index 334a4783a76..f6f9b72f9b6 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -1,4 +1,4 @@ -use std::collections::{hash_map, BTreeSet, HashMap, HashSet}; +use std::collections::{hash_map, BTreeSet, HashMap}; use zksync_types::{ l1::L1Tx, l2::L2Tx, Address, ExecuteTransactionCommon, Nonce, PriorityOpId, Transaction, @@ -221,22 +221,57 @@ impl MempoolStore { } fn gc(&mut self) -> Vec
{ - if self.size >= self.capacity { - let index: HashSet<_> = self + if self.size > self.capacity { + let mut transactions = std::mem::take(&mut self.l2_transactions_per_account); + let mut possibly_kept: Vec<_> = self .l2_priority_queue .iter() - .map(|pointer| pointer.account) + .rev() + .filter_map(|pointer| { + transactions + .remove(&pointer.account) + .map(|txs| (pointer.account, txs)) + }) .collect(); - let transactions = std::mem::take(&mut self.l2_transactions_per_account); - let (kept, drained) = transactions + + let mut sum = 0; + let mut number_of_accounts_kept = 0; + for (_, txs) in &possibly_kept { + sum += txs.len(); + if sum <= self.capacity as usize { + number_of_accounts_kept += 1; + } else { + break; + } + } + if number_of_accounts_kept == 0 && !possibly_kept.is_empty() { + tracing::warn!("mempool capacity is too low to handle txs from single account, consider increasing capacity"); + // Keep at least one entry, otherwise mempool won't return any new L2 tx to process. + number_of_accounts_kept = 1; + } + let (kept, drained) = { + let mut drained: Vec<_> = transactions.into_keys().collect(); + let also_drained = possibly_kept + .split_off(number_of_accounts_kept) + .into_iter() + .map(|(address, _)| address); + drained.extend(also_drained); + + (possibly_kept, drained) + }; + + let l2_priority_queue = std::mem::take(&mut self.l2_priority_queue); + self.l2_priority_queue = l2_priority_queue .into_iter() - .partition(|(address, _)| index.contains(address)); - self.l2_transactions_per_account = kept; + .rev() + .take(number_of_accounts_kept) + .collect(); + self.l2_transactions_per_account = kept.into_iter().collect(); self.size = self .l2_transactions_per_account .iter() - .fold(0, |agg, (_, tnxs)| agg + tnxs.len() as u64); - return drained.into_keys().collect(); + .fold(0, |agg, (_, txs)| agg + txs.len() as u64); + return drained; } vec![] } diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index 96ef600984f..b84ab7d5765 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -321,32 +321,26 @@ fn stashed_accounts() { #[test] fn mempool_capacity() { - let mut mempool = MempoolStore::new(PriorityOpId(0), 5); + let mut mempool = MempoolStore::new(PriorityOpId(0), 4); let account0 = Address::random(); let account1 = Address::random(); let account2 = Address::random(); + let account3 = Address::random(); let transactions = vec![ gen_l2_tx(account0, Nonce(0)), gen_l2_tx(account0, Nonce(1)), gen_l2_tx(account0, Nonce(2)), - gen_l2_tx(account1, Nonce(1)), - gen_l2_tx(account2, Nonce(1)), + gen_l2_tx_with_timestamp(account1, Nonce(0), unix_timestamp_ms() + 1), + gen_l2_tx_with_timestamp(account2, Nonce(0), unix_timestamp_ms() + 2), + gen_l2_tx(account3, Nonce(1)), ]; mempool.insert(transactions, HashMap::new()); - // the mempool is full. Accounts with non-sequential nonces got stashed + // Mempool is full. Accounts with non-sequential nonces and some accounts with lowest score should be purged. assert_eq!( HashSet::<_>::from_iter(mempool.get_mempool_info().purged_accounts), - HashSet::<_>::from_iter(vec![account1, account2]), - ); - // verify that existing good-to-go transactions and new ones got picked - mempool.insert( - vec![gen_l2_tx_with_timestamp( - account1, - Nonce(0), - unix_timestamp_ms() + 1, - )], - HashMap::new(), + HashSet::from([account2, account3]), ); + // verify that good-to-go transactions are kept. for _ in 0..3 { assert_eq!( mempool @@ -363,6 +357,34 @@ fn mempool_capacity() { .initiator_account(), account1 ); + assert!(!mempool.has_next(&L2TxFilter::default())); +} + +#[test] +fn mempool_does_not_purge_all_accounts() { + let mut mempool = MempoolStore::new(PriorityOpId(0), 1); + let account0 = Address::random(); + let account1 = Address::random(); + let transactions = vec![ + gen_l2_tx(account0, Nonce(0)), + gen_l2_tx(account0, Nonce(1)), + gen_l2_tx(account1, Nonce(1)), + ]; + mempool.insert(transactions, HashMap::new()); + // Mempool is full. Account 1 has tx with non-sequential nonce so it should be purged. + // Txs from account 0 have sequential nonces but their number is greater than capacity; they should be kept. + assert_eq!(mempool.get_mempool_info().purged_accounts, vec![account1]); + // verify that good-to-go transactions are kept. + for _ in 0..2 { + assert_eq!( + mempool + .next_transaction(&L2TxFilter::default()) + .unwrap() + .initiator_account(), + account0 + ); + } + assert!(!mempool.has_next(&L2TxFilter::default())); } fn gen_l2_tx(address: Address, nonce: Nonce) -> Transaction { diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index dbe1e4cb977..a17f2670cbb 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -89,20 +89,35 @@ impl MempoolFetcher { .await .context("failed getting pending protocol version")?; - let l2_tx_filter = l2_tx_filter( - self.batch_fee_input_provider.as_ref(), - protocol_version.into(), - ) - .await - .context("failed creating L2 transaction filter")?; + let (fee_per_gas, gas_per_pubdata) = if let Some(unsealed_batch) = storage + .blocks_dal() + .get_unsealed_l1_batch() + .await + .context("failed getting unsealed batch")? + { + let (fee_per_gas, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + unsealed_batch.fee_input, + protocol_version.into(), + ); + (fee_per_gas, gas_per_pubdata as u32) + } else { + let filter = l2_tx_filter( + self.batch_fee_input_provider.as_ref(), + protocol_version.into(), + ) + .await + .context("failed creating L2 transaction filter")?; + + (filter.fee_per_gas, filter.gas_per_pubdata) + }; let transactions = storage .transactions_dal() .sync_mempool( &mempool_info.stashed_accounts, &mempool_info.purged_accounts, - l2_tx_filter.gas_per_pubdata, - l2_tx_filter.fee_per_gas, + gas_per_pubdata, + fee_per_gas, self.sync_batch_size, ) .await From 0757ecd56e531888127cd146f8c2745099a6ed93 Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Mon, 21 Oct 2024 23:52:59 +1100 Subject: [PATCH 099/140] chore(zkstack_cli): build sys contract with `--frozen-lockfile` (#3138) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- zkstack_cli/crates/common/src/contracts.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zkstack_cli/crates/common/src/contracts.rs b/zkstack_cli/crates/common/src/contracts.rs index c95849131c1..8f5ae805602 100644 --- a/zkstack_cli/crates/common/src/contracts.rs +++ b/zkstack_cli/crates/common/src/contracts.rs @@ -26,7 +26,8 @@ pub fn build_l2_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result pub fn build_system_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { let _dir_guard = shell.push_dir(link_to_code.join("contracts/system-contracts")); - Cmd::new(cmd!(shell, "yarn install")).run()?; + // Do not update era-contract's lockfile to avoid dirty submodule + Cmd::new(cmd!(shell, "yarn install --frozen-lockfile")).run()?; Cmd::new(cmd!(shell, "yarn preprocess:system-contracts")).run()?; Cmd::new(cmd!( shell, From 7c289649b7b3c418c7193a35b51c264cf4970f3c Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Mon, 21 Oct 2024 16:01:59 +0200 Subject: [PATCH 100/140] feat(prover): Add min_provers and dry_run features. Improve metrics and test. (#3129) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Improve metrics and test. Add min_provers config. Add dry_run config option for Agent. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../config/src/configs/prover_autoscaler.rs | 9 + .../src/proto/config/prover_autoscaler.proto | 7 + .../protobuf_config/src/prover_autoscaler.rs | 32 +- prover/Cargo.lock | 22 + prover/Cargo.toml | 1 + .../crates/bin/prover_autoscaler/Cargo.toml | 1 + .../prover_autoscaler/src/global/queuer.rs | 18 +- .../prover_autoscaler/src/global/scaler.rs | 399 +++++++++++++++--- .../prover_autoscaler/src/global/watcher.rs | 21 +- .../bin/prover_autoscaler/src/k8s/scaler.rs | 15 + .../bin/prover_autoscaler/src/k8s/watcher.rs | 7 +- .../crates/bin/prover_autoscaler/src/main.rs | 2 +- .../bin/prover_autoscaler/src/metrics.rs | 10 +- 13 files changed, 467 insertions(+), 77 deletions(-) diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs index 6f83f0d2d18..b24a1a26651 100644 --- a/core/lib/config/src/configs/prover_autoscaler.rs +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -30,6 +30,9 @@ pub struct ProverAutoscalerAgentConfig { pub namespaces: Vec, /// Watched cluster name. Also can be set via flag. pub cluster_name: Option, + /// If dry-run enabled don't do any k8s updates, just report success. + #[serde(default = "ProverAutoscalerAgentConfig::default_dry_run")] + pub dry_run: bool, } #[derive(Debug, Clone, PartialEq, Deserialize, Default)] @@ -53,6 +56,8 @@ pub struct ProverAutoscalerScalerConfig { pub prover_speed: HashMap, /// Maximum number of provers which can be run per cluster/GPU. pub max_provers: HashMap>, + /// Minimum number of provers per namespace. + pub min_provers: HashMap, /// Duration after which pending pod considered long pending. #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] pub long_pending_duration: Duration, @@ -99,6 +104,10 @@ impl ProverAutoscalerAgentConfig { pub fn default_namespaces() -> Vec { vec!["prover-blue".to_string(), "prover-red".to_string()] } + + pub fn default_dry_run() -> bool { + true + } } impl ProverAutoscalerScalerConfig { diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto index 8363b625119..9b7f201e9b7 100644 --- a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -17,6 +17,7 @@ message ProverAutoscalerAgentConfig { optional uint32 http_port = 2; // required repeated string namespaces = 3; // optional optional string cluster_name = 4; // optional + optional bool dry_run = 5; // optional } message ProtocolVersion { @@ -39,6 +40,11 @@ message MaxProver { optional uint32 max = 2; // required } +message MinProver { + optional string namespace = 1; // required + optional uint32 min = 2; // required +} + message ProverAutoscalerScalerConfig { optional uint32 prometheus_port = 1; // required optional std.Duration scaler_run_interval = 2; // optional @@ -49,4 +55,5 @@ message ProverAutoscalerScalerConfig { repeated ProverSpeed prover_speed = 7; // optional optional uint32 long_pending_duration_s = 8; // optional repeated MaxProver max_provers = 9; // optional + repeated MinProver min_provers = 10; // optional } diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs index e95e4003972..51f1b162d4c 100644 --- a/core/lib/protobuf_config/src/prover_autoscaler.rs +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use anyhow::Context as _; +use anyhow::Context; use time::Duration; use zksync_config::configs::{self, prover_autoscaler::Gpu}; use zksync_protobuf::{read_optional, repr::ProtoRepr, required, ProtoFmt}; @@ -42,6 +42,7 @@ impl ProtoRepr for proto::ProverAutoscalerAgentConfig { .context("http_port")?, namespaces: self.namespaces.to_vec(), cluster_name: Some("".to_string()), + dry_run: self.dry_run.unwrap_or(Self::Type::default_dry_run()), }) } @@ -51,6 +52,7 @@ impl ProtoRepr for proto::ProverAutoscalerAgentConfig { http_port: Some(this.http_port.into()), namespaces: this.namespaces.clone(), cluster_name: this.cluster_name.clone(), + dry_run: Some(this.dry_run), } } } @@ -103,6 +105,13 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { } acc }), + min_provers: self + .min_provers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("min_provers")?, }) } @@ -137,6 +146,11 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { }) }) .collect(), + min_provers: this + .min_provers + .iter() + .map(|(k, v)| proto::MinProver::build(&(k.clone(), *v))) + .collect(), } } } @@ -208,3 +222,19 @@ impl ProtoRepr for proto::MaxProver { } } } + +impl ProtoRepr for proto::MinProver { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.namespace).context("namespace")?.clone(), + *required(&self.min).context("min")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + namespace: Some(this.0.to_string()), + min: Some(this.1), + } + } +} diff --git a/prover/Cargo.lock b/prover/Cargo.lock index dd2df67902f..e5b42f1601b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -6702,6 +6702,27 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-test" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "557b891436fe0d5e0e363427fc7f217abf9ccd510d5136549847bdcbcd011d68" +dependencies = [ + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" +dependencies = [ + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -8300,6 +8321,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", + "tracing-test", "url", "vise", "zksync_config", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 61169dd4363..af022e691c1 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -58,6 +58,7 @@ tokio-util = "0.7.11" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" +tracing-test = "0.2.5" url = "2.5.2" vise = "0.2.0" diff --git a/prover/crates/bin/prover_autoscaler/Cargo.toml b/prover/crates/bin/prover_autoscaler/Cargo.toml index 9743b45593e..fbf3ecae909 100644 --- a/prover/crates/bin/prover_autoscaler/Cargo.toml +++ b/prover/crates/bin/prover_autoscaler/Cargo.toml @@ -43,3 +43,4 @@ tracing-subscriber = { workspace = true, features = ["env-filter"] } tracing.workspace = true url.workspace = true vise.workspace = true +tracing-test.workspace = true diff --git a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs index 1ef5d96386b..32610ebf3c3 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs @@ -5,6 +5,10 @@ use reqwest::Method; use zksync_prover_job_monitor::autoscaler_queue_reporter::VersionedQueueReport; use zksync_utils::http_with_retries::send_request_with_retries; +use crate::metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}; + +const MAX_RETRIES: usize = 5; + #[derive(Debug)] pub struct Queue { pub queue: HashMap, @@ -24,15 +28,19 @@ impl Queuer { pub async fn get_queue(&self) -> anyhow::Result { let url = &self.prover_job_monitor_url; - let response = send_request_with_retries(url, 5, Method::GET, None, None).await; - let res = response - .map_err(|err| anyhow::anyhow!("Failed fetching queue from url: {url}: {err:?}"))? + let response = send_request_with_retries(url, MAX_RETRIES, Method::GET, None, None).await; + let response = response.map_err(|err| { + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching queue from url: {url}: {err:?}") + })?; + + AUTOSCALER_METRICS.calls[&(url.clone(), response.status().as_u16())].inc(); + let response = response .json::>() .await .context("Failed to read response as json")?; - Ok(Queue { - queue: res + queue: response .iter() .map(|x| (x.version.to_string(), x.report.prover_jobs.queued as u64)) .collect::>(), diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index dd3f3cf1ad3..f10902f5dd2 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -44,9 +44,9 @@ struct GPUPoolKey { } static PROVER_DEPLOYMENT_RE: Lazy = - Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?$").unwrap()); + Lazy::new(|| Regex::new(r"^circuit-prover-gpu(-(?[ltvpa]\d+))?$").unwrap()); static PROVER_POD_RE: Lazy = - Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?").unwrap()); + Lazy::new(|| Regex::new(r"^circuit-prover-gpu(-(?[ltvpa]\d+))?").unwrap()); pub struct Scaler { /// namespace to Protocol Version configuration. @@ -56,6 +56,7 @@ pub struct Scaler { /// Which cluster to use first. cluster_priorities: HashMap, + min_provers: HashMap, max_provers: HashMap>, prover_speed: HashMap, long_pending_duration: chrono::Duration, @@ -83,11 +84,19 @@ impl Scaler { queuer: queuer::Queuer, config: ProverAutoscalerScalerConfig, ) -> Self { + config + .protocol_versions + .iter() + .for_each(|(namespace, version)| { + AUTOSCALER_METRICS.prover_protocol_version[&(namespace.clone(), version.clone())] + .set(1); + }); Self { namespaces: config.protocol_versions, watcher, queuer, cluster_priorities: config.cluster_priorities, + min_provers: config.min_provers, max_provers: config.max_provers, prover_speed: config.prover_speed, long_pending_duration: chrono::Duration::seconds( @@ -200,16 +209,23 @@ impl Scaler { self.speed(gpu) * n as u64 } - fn normalize_queue(&self, gpu: Gpu, q: u64) -> u64 { + fn normalize_queue(&self, gpu: Gpu, queue: u64) -> u64 { let speed = self.speed(gpu); // Divide and round up if there's any remainder. - (q + speed - 1) / speed * speed + (queue + speed - 1) / speed * speed } - fn run(&self, namespace: &String, q: u64, clusters: &Clusters) -> HashMap { + fn run(&self, namespace: &String, queue: u64, clusters: &Clusters) -> HashMap { let sc = self.sorted_clusters(namespace, clusters); tracing::debug!("Sorted clusters for namespace {}: {:?}", namespace, &sc); + // Increase queue size, if it's too small, to make sure that required min_provers are + // running. + let queue: u64 = self.min_provers.get(namespace).map_or(queue, |min| { + self.normalize_queue(Gpu::L4, queue) + .max(self.provers_to_speed(Gpu::L4, *min)) + }); + let mut total: i64 = 0; let mut provers: HashMap = HashMap::new(); for c in &sc { @@ -228,9 +244,9 @@ impl Scaler { } // Remove unneeded pods. - if (total as u64) > self.normalize_queue(Gpu::L4, q) { + if (total as u64) > self.normalize_queue(Gpu::L4, queue) { for c in sc.iter().rev() { - let mut excess_queue = total as u64 - self.normalize_queue(c.gpu, q); + let mut excess_queue = total as u64 - self.normalize_queue(c.gpu, queue); let mut excess_provers = (excess_queue / self.speed(c.gpu)) as u32; let p = provers.entry(c.to_key()).or_default(); if *p < excess_provers { @@ -255,11 +271,11 @@ impl Scaler { } } - tracing::debug!("Queue coverd with provers: {}", total); + tracing::debug!("Queue covered with provers: {}", total); // Add required provers. - if (total as u64) < q { + if (total as u64) < queue { for c in &sc { - let mut required_queue = q - total as u64; + let mut required_queue = queue - total as u64; let mut required_provers = (self.normalize_queue(c.gpu, required_queue) / self.speed(c.gpu)) as u32; let p = provers.entry(c.to_key()).or_default(); @@ -306,6 +322,7 @@ impl Task for Scaler { let guard = self.watcher.data.lock().await; if let Err(err) = watcher::check_is_ready(&guard.is_ready) { + AUTOSCALER_METRICS.clusters_not_ready.inc(); tracing::warn!("Skipping Scaler run: {}", err); return Ok(()); } @@ -329,70 +346,342 @@ impl Task for Scaler { #[cfg(test)] mod tests { - use std::sync::Arc; - - use tokio::sync::Mutex; - use super::*; use crate::{ cluster_types::{Deployment, Namespace, Pod}, global::{queuer, watcher}, }; + #[tracing_test::traced_test] #[test] fn test_run() { - let watcher = watcher::Watcher { - cluster_agents: vec![], - data: Arc::new(Mutex::new(watcher::WatchedData::default())), - }; - let queuer = queuer::Queuer { - prover_job_monitor_url: "".to_string(), - }; let scaler = Scaler::new( - watcher, - queuer, + watcher::Watcher::default(), + queuer::Queuer::default(), ProverAutoscalerScalerConfig { - max_provers: HashMap::from([("foo".to_string(), HashMap::from([(Gpu::L4, 100)]))]), + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + min_provers: [("prover-other".into(), 2)].into(), + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), ..Default::default() }, ); - let got = scaler.run( - &"prover".to_string(), - 1499, - &Clusters { - clusters: HashMap::from([( - "foo".to_string(), - Cluster { - name: "foo".to_string(), - namespaces: HashMap::from([( - "prover".to_string(), - Namespace { - deployments: HashMap::from([( - "prover-gpu-fri-spec-1".to_string(), - Deployment { + + assert_eq!( + scaler.run( + &"prover".into(), + 1499, + &Clusters { + clusters: [( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + pods: [( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + )] + .into(), + }, + )] + .into(), + }, + )] + .into(), + }, + ), + [( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 3, + )] + .into(), + "3 new provers" + ); + assert_eq!( + scaler.run( + &"prover".into(), + 499, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), ..Default::default() }, - )]), - pods: HashMap::from([( - "prover-gpu-fri-spec-1-c47644679-x9xqp".to_string(), - Pod { - status: "Running".to_string(), - ..Default::default() + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 1, + desired: 1, + }, + )] + .into(), + pods: [( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + )] + .into(), }, - )]), + )] + .into(), }, - )]), + ) + ] + .into(), + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, }, - )]), - }, + 0, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 1, + ) + ] + .into(), + "Preserve running" ); - let want = HashMap::from([( - GPUPoolKey { - cluster: "foo".to_string(), - gpu: Gpu::L4, + } + + #[tracing_test::traced_test] + #[test] + fn test_run_min_provers() { + let scaler = Scaler::new( + watcher::Watcher::default(), + queuer::Queuer::default(), + ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + min_provers: [("prover".into(), 2)].into(), + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + ..Default::default() }, - 3, - )]); - assert_eq!(got, want); + ); + + assert_eq!( + scaler.run( + &"prover".into(), + 10, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ) + ] + .into(), + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 0, + ) + ] + .into(), + "Min 2 provers, non running" + ); + assert_eq!( + scaler.run( + &"prover".into(), + 0, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 3, + desired: 3, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc3".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ) + ] + .into(), + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 2, + desired: 2, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ) + ] + .into(), + }, + )] + .into(), + }, + ) + ] + .into(), + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 0, + ) + ] + .into(), + "Min 2 provers, 5 running" + ); } } diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs index 01fa68c60f8..646b320e12d 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -9,9 +9,12 @@ use zksync_utils::http_with_retries::send_request_with_retries; use crate::{ cluster_types::{Cluster, Clusters}, + metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}, task_wiring::Task, }; +const MAX_RETRIES: usize = 5; + #[derive(Default)] pub struct WatchedData { pub clusters: Clusters, @@ -27,7 +30,7 @@ pub fn check_is_ready(v: &Vec) -> Result<()> { Ok(()) } -#[derive(Clone)] +#[derive(Default, Clone)] pub struct Watcher { /// List of base URLs of all agents. pub cluster_agents: Vec>, @@ -74,15 +77,19 @@ impl Task for Watcher { .context("Failed to join URL with /cluster")? .to_string(); let response = - send_request_with_retries(&url, 5, Method::GET, None, None).await; - let res = response - .map_err(|err| { - anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") - })? + send_request_with_retries(&url, MAX_RETRIES, Method::GET, None, None).await; + + let response = response.map_err(|err| { + // TODO: refactor send_request_with_retries to return status. + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })?; + AUTOSCALER_METRICS.calls[&(url, response.status().as_u16())].inc(); + let response = response .json::() .await .context("Failed to read response as json"); - Ok((i, res)) + Ok((i, response)) }) }) .collect(); diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs index 170b0b10650..5e6f56aacc9 100644 --- a/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs @@ -4,9 +4,14 @@ use kube::api::{Api, Patch, PatchParams}; #[derive(Clone)] pub struct Scaler { pub client: kube::Client, + dry_run: bool, } impl Scaler { + pub fn new(client: kube::Client, dry_run: bool) -> Self { + Self { client, dry_run } + } + pub async fn scale(&self, namespace: &str, name: &str, size: i32) -> anyhow::Result<()> { let deployments: Api = Api::namespaced(self.client.clone(), namespace); @@ -18,6 +23,16 @@ impl Scaler { "replicas": size } }); + + if self.dry_run { + tracing::info!( + "Dry run of scaled deployment/{} to {} replica(s).", + name, + size + ); + return Ok(()); + } + let pp = PatchParams::default(); deployments.patch(name, &pp, &Patch::Merge(patch)).await?; tracing::info!("Scaled deployment/{} to {} replica(s).", name, size); diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs index 8746d17663b..f94dfc3704f 100644 --- a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -9,10 +9,7 @@ use kube::{ }; use tokio::sync::Mutex; -use crate::{ - cluster_types::{Cluster, Deployment, Namespace, Pod}, - metrics::AUTOSCALER_METRICS, -}; +use crate::cluster_types::{Cluster, Deployment, Namespace, Pod}; #[derive(Clone)] pub struct Watcher { @@ -38,8 +35,6 @@ impl Watcher { pub async fn run(self) -> anyhow::Result<()> { // TODO: add actual metrics - AUTOSCALER_METRICS.protocol_version.set(1); - AUTOSCALER_METRICS.calls.inc_by(1); // TODO: watch for a list of namespaces, get: // - deployments (name, running, desired) [done] diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs index e3aec1fbd39..45e476079a5 100644 --- a/prover/crates/bin/prover_autoscaler/src/main.rs +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -95,7 +95,7 @@ async fn main() -> anyhow::Result<()> { // TODO: maybe get cluster name from curl -H "Metadata-Flavor: Google" // http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name let watcher = Watcher::new(client.clone(), cluster, agent_config.namespaces); - let scaler = Scaler { client }; + let scaler = Scaler::new(client, agent_config.dry_run); tasks.push(tokio::spawn(watcher.clone().run())); tasks.push(tokio::spawn(agent::run_server( agent_config.http_port, diff --git a/prover/crates/bin/prover_autoscaler/src/metrics.rs b/prover/crates/bin/prover_autoscaler/src/metrics.rs index 09cbaa6ba00..d94ac8b97e9 100644 --- a/prover/crates/bin/prover_autoscaler/src/metrics.rs +++ b/prover/crates/bin/prover_autoscaler/src/metrics.rs @@ -1,13 +1,19 @@ use vise::{Counter, Gauge, LabeledFamily, Metrics}; use zksync_config::configs::prover_autoscaler::Gpu; +pub const DEFAULT_ERROR_CODE: u16 = 500; + #[derive(Debug, Metrics)] #[metrics(prefix = "autoscaler")] pub(crate) struct AutoscalerMetrics { - pub protocol_version: Gauge, - pub calls: Counter, + #[metrics(labels = ["target_namespace", "protocol_version"])] + pub prover_protocol_version: LabeledFamily<(String, String), Gauge, 2>, #[metrics(labels = ["target_cluster", "target_namespace", "gpu"])] pub provers: LabeledFamily<(String, String, Gpu), Gauge, 3>, + pub clusters_not_ready: Counter, + #[metrics(labels = ["target", "status"])] + pub calls: LabeledFamily<(String, u16), Counter, 2>, + // TODO: count of command send succes/fail } #[vise::register] From 7241ae139b2b6bf9a9966eaa2f22203583a3786f Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Tue, 22 Oct 2024 09:29:51 +0200 Subject: [PATCH 101/140] fix(tee_prover): add zstd compression (#3144) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add zstd compression for the HTTP connection between `proof_data_handler` and `zksync_tee_prover`. ## Why ❔ This enables faster intercloud communication. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --------- Signed-off-by: Harald Hoyer --- Cargo.lock | 37 +++++++++++++++++++++++++ core/bin/zksync_tee_prover/Cargo.toml | 2 +- core/node/proof_data_handler/Cargo.toml | 1 + core/node/proof_data_handler/src/lib.rs | 2 ++ 4 files changed, 41 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 5da4cc8c143..05c26a74834 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -292,6 +292,20 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-compression" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" +dependencies = [ + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", +] + [[package]] name = "async-executor" version = "1.13.1" @@ -5886,6 +5900,7 @@ version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ + "async-compression", "base64 0.22.1", "bytes", "encoding_rs", @@ -8309,13 +8324,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ + "async-compression", "bitflags 2.6.0", "bytes", + "futures-core", "http 1.1.0", "http-body 1.0.1", "http-body-util", "pin-project-lite", "tokio", + "tokio-util", "tower-layer", "tower-service", ] @@ -10866,6 +10884,7 @@ dependencies = [ "serde_json", "tokio", "tower 0.4.13", + "tower-http", "tracing", "vise", "zksync_basic_types", @@ -11414,6 +11433,24 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zstd" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.13+zstd.1.5.6" diff --git a/core/bin/zksync_tee_prover/Cargo.toml b/core/bin/zksync_tee_prover/Cargo.toml index 85908eebeaa..b853da348ee 100644 --- a/core/bin/zksync_tee_prover/Cargo.toml +++ b/core/bin/zksync_tee_prover/Cargo.toml @@ -15,7 +15,7 @@ publish = false anyhow.workspace = true async-trait.workspace = true envy.workspace = true -reqwest.workspace = true +reqwest = { workspace = true, features = ["zstd"] } secp256k1 = { workspace = true, features = ["serde"] } serde = { workspace = true, features = ["derive"] } thiserror.workspace = true diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 76dc89eda04..e2ddc972a2f 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -22,6 +22,7 @@ zksync_utils.workspace = true anyhow.workspace = true axum.workspace = true tokio.workspace = true +tower-http = { workspace = true, features = ["compression-zstd", "decompression-zstd"] } tracing.workspace = true [dev-dependencies] diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index a482a7bc07b..661c76d2000 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -139,4 +139,6 @@ fn create_proof_processing_router( } router + .layer(tower_http::compression::CompressionLayer::new()) + .layer(tower_http::decompression::RequestDecompressionLayer::new().zstd(true)) } From abeee8190d3c3a5e577d71024bdfb30ff516ad03 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 22 Oct 2024 16:19:30 +0300 Subject: [PATCH 102/140] fix(en): Return `SyncState` health check (#3142) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Returns `SyncState`-based health check, which was removed when transitioning to the node framework. ## Why ❔ This health check looks useful and is used at least in some internal automations. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/bin/external_node/src/tests/mod.rs | 14 +++++++++++--- .../layers/state_keeper/external_io.rs | 6 ++++++ .../implementations/layers/sync_state_updater.rs | 8 ++++++++ core/node/node_sync/src/sync_state.rs | 1 + 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index b21dbd0db9a..c5dd88748e5 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -22,10 +22,18 @@ const POLL_INTERVAL: Duration = Duration::from_millis(100); #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging - let (env, env_handles) = utils::TestEnvironment::with_genesis_block(components_str).await; - let expected_health_components = utils::expected_health_components(&env.components); + let mut expected_health_components = utils::expected_health_components(&env.components); + let expected_shutdown_components = expected_health_components.clone(); + let has_core_or_api = env.components.0.iter().any(|component| { + [Component::Core, Component::HttpApi, Component::WsApi].contains(component) + }); + if has_core_or_api { + // The `sync_state` component doesn't signal its shutdown, but should be present in the list of components + expected_health_components.push("sync_state"); + } + let l2_client = utils::mock_l2_client(&env); let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address()); @@ -84,7 +92,7 @@ async fn external_node_basics(components_str: &'static str) { let health_data = app_health.check_health().await; tracing::info!(?health_data, "final health data"); assert_matches!(health_data.inner().status(), HealthStatus::ShutDown); - for name in expected_health_components { + for name in expected_shutdown_components { let component_health = &health_data.components()[name]; assert_matches!(component_health.status(), HealthStatus::ShutDown); } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs index 31b76550767..2c23f5aa9a1 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs @@ -8,6 +8,7 @@ use zksync_types::L2ChainId; use crate::{ implementations::resources::{ action_queue::ActionQueueSenderResource, + healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, @@ -26,6 +27,7 @@ pub struct ExternalIOLayer { #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { + pub app_health: AppHealthCheckResource, pub pool: PoolResource, pub main_node_client: MainNodeClientResource, } @@ -57,6 +59,10 @@ impl WiringLayer for ExternalIOLayer { async fn wire(self, input: Self::Input) -> Result { // Create `SyncState` resource. let sync_state = SyncState::default(); + let app_health = &input.app_health.0; + app_health + .insert_custom_component(Arc::new(sync_state.clone())) + .map_err(WiringError::internal)?; // Create `ActionQueueSender` resource. let (action_queue_sender, action_queue) = ActionQueue::new(); diff --git a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs index 1f86b43f7a5..dd2652dfddb 100644 --- a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs +++ b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs @@ -1,9 +1,12 @@ +use std::sync::Arc; + use zksync_dal::{ConnectionPool, Core}; use zksync_node_sync::SyncState; use zksync_web3_decl::client::{DynClient, L2}; use crate::{ implementations::resources::{ + healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, sync_state::SyncStateResource, @@ -24,6 +27,7 @@ pub struct SyncStateUpdaterLayer; pub struct Input { /// Fetched to check whether the `SyncState` was already provided by another layer. pub sync_state: Option, + pub app_health: AppHealthCheckResource, pub master_pool: PoolResource, pub main_node_client: MainNodeClientResource, } @@ -62,6 +66,10 @@ impl WiringLayer for SyncStateUpdaterLayer { let MainNodeClientResource(main_node_client) = input.main_node_client; let sync_state = SyncState::default(); + let app_health = &input.app_health.0; + app_health + .insert_custom_component(Arc::new(sync_state.clone())) + .map_err(WiringError::internal)?; Ok(Output { sync_state: Some(sync_state.clone().into()), diff --git a/core/node/node_sync/src/sync_state.rs b/core/node/node_sync/src/sync_state.rs index e061ff7da01..f8a2fe00ec0 100644 --- a/core/node/node_sync/src/sync_state.rs +++ b/core/node/node_sync/src/sync_state.rs @@ -173,6 +173,7 @@ impl CheckHealth for SyncState { Health::from(&*self.0.borrow()) } } + impl SyncStateInner { fn is_synced(&self) -> (bool, Option) { if let (Some(main_node_block), Some(local_block)) = (self.main_node_block, self.local_block) From b29be7d9a8c664beac5d8384548db54de0ba882f Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Tue, 22 Oct 2024 16:45:10 +0200 Subject: [PATCH 103/140] feat(zkstack_cli): Autocompletion support (#3097) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add autocomplete feature to zkstack: ```bash ❯ zkstack chain dev -- Chain related commands consensus -- Update ZKsync containers -- Run containers for local development contract-verifier -- Run contract verifier ecosystem -- Ecosystem related commands explorer -- Run block-explorer external-node -- External Node related commands help -- Print this message or the help of the given subcommand(s) markdown update -- portal -- Run dapp-portal prover -- Prover related commands server -- Run server ``` ```bash ❯ zkstack ecosystem build-transactions -- Create transactions to build ecosystem contracts change-default-chain -- Change the default chain create -- Create a new ecosystem and chain, setting necessary configurations for later initialization help -- Print this message or the help of the given subcommand(s) init -- Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations setup-observability -- Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo ``` ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --------- Co-authored-by: Danil --- .github/workflows/ci-core-lint-reusable.yml | 1 + zkstack_cli/Cargo.lock | 12 +- zkstack_cli/Cargo.toml | 3 +- zkstack_cli/crates/zkstack/Cargo.toml | 6 +- zkstack_cli/crates/zkstack/build.rs | 121 +- .../crates/zkstack/completion/_zkstack.zsh | 4997 ++++++++++++ .../crates/zkstack/completion/zkstack.fish | 701 ++ .../crates/zkstack/completion/zkstack.sh | 6998 +++++++++++++++++ .../zkstack/src/commands/args/autocomplete.rs | 13 + .../crates/zkstack/src/commands/args/mod.rs | 2 + .../zkstack/src/commands/autocomplete.rs | 52 + .../zkstack/src/commands/chain/args/create.rs | 4 +- .../src/commands/chain/args/init/configs.rs | 2 +- .../src/commands/chain/args/init/mod.rs | 2 +- .../zkstack/src/commands/dev/commands/lint.rs | 67 +- .../src/commands/dev/commands/lint_utils.rs | 1 + .../commands/dev/commands/test/args/fees.rs | 2 +- .../dev/commands/test/args/recovery.rs | 2 +- .../commands/dev/commands/test/args/revert.rs | 2 +- .../src/commands/ecosystem/args/create.rs | 4 +- .../src/commands/ecosystem/args/init.rs | 2 +- .../crates/zkstack/src/commands/mod.rs | 1 + zkstack_cli/crates/zkstack/src/main.rs | 78 +- zkstack_cli/crates/zkstack/src/messages.rs | 7 + 24 files changed, 13021 insertions(+), 59 deletions(-) create mode 100644 zkstack_cli/crates/zkstack/completion/_zkstack.zsh create mode 100644 zkstack_cli/crates/zkstack/completion/zkstack.fish create mode 100644 zkstack_cli/crates/zkstack/completion/zkstack.sh create mode 100644 zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/autocomplete.rs diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 53b25835ff5..0babbd1c9db 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -49,6 +49,7 @@ jobs: ci_run zkstack dev lint -t js --check ci_run zkstack dev lint -t ts --check ci_run zkstack dev lint -t rs --check + ci_run zkstack dev lint -t autocompletion --check - name: Check Database run: | diff --git a/zkstack_cli/Cargo.lock b/zkstack_cli/Cargo.lock index 63561c02b9d..7770d06a197 100644 --- a/zkstack_cli/Cargo.lock +++ b/zkstack_cli/Cargo.lock @@ -609,6 +609,15 @@ dependencies = [ "terminal_size", ] +[[package]] +name = "clap_complete" +version = "4.5.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9646e2e245bf62f45d39a0f3f36f1171ad1ea0d6967fd114bca72cb02a8fcdfb" +dependencies = [ + "clap", +] + [[package]] name = "clap_derive" version = "4.5.18" @@ -6709,11 +6718,12 @@ dependencies = [ "chrono", "clap", "clap-markdown", + "clap_complete", "cliclack", "common", "config", + "dirs", "ethers", - "eyre", "futures", "human-panic", "lazy_static", diff --git a/zkstack_cli/Cargo.toml b/zkstack_cli/Cargo.toml index a805cf85d51..1f493f9c3e4 100644 --- a/zkstack_cli/Cargo.toml +++ b/zkstack_cli/Cargo.toml @@ -40,11 +40,12 @@ zksync_protobuf_build = "=0.5.0" # External dependencies anyhow = "1.0.82" clap = { version = "4.4", features = ["derive", "wrap_help", "string"] } +clap_complete = "4.5.33" +dirs = "5.0.1" slugify-rs = "0.0.3" cliclack = "0.2.5" console = "0.15.8" chrono = "0.4.38" -eyre = "0.6.12" ethers = "2.0" futures = "0.3.30" human-panic = "2.0" diff --git a/zkstack_cli/crates/zkstack/Cargo.toml b/zkstack_cli/crates/zkstack/Cargo.toml index a9fcecaf79b..93a78c751b1 100644 --- a/zkstack_cli/crates/zkstack/Cargo.toml +++ b/zkstack_cli/crates/zkstack/Cargo.toml @@ -14,10 +14,12 @@ keywords.workspace = true anyhow.workspace = true chrono.workspace = true clap.workspace = true +clap_complete.workspace = true clap-markdown.workspace = true cliclack.workspace = true common.workspace = true config.workspace = true +dirs.workspace = true ethers.workspace = true futures.workspace = true human-panic.workspace = true @@ -49,6 +51,8 @@ rand.workspace = true zksync_consensus_utils.workspace = true [build-dependencies] -eyre.workspace = true +anyhow.workspace = true +clap_complete.workspace = true +dirs.workspace = true ethers.workspace = true zksync_protobuf_build.workspace = true diff --git a/zkstack_cli/crates/zkstack/build.rs b/zkstack_cli/crates/zkstack/build.rs index 92f34a542b7..bccf5bae89f 100644 --- a/zkstack_cli/crates/zkstack/build.rs +++ b/zkstack_cli/crates/zkstack/build.rs @@ -1,12 +1,23 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; +use anyhow::{anyhow, Context}; use ethers::contract::Abigen; -fn main() -> eyre::Result<()> { +const COMPLETION_DIR: &str = "completion"; + +fn main() -> anyhow::Result<()> { let outdir = PathBuf::from(std::env::var("OUT_DIR")?).canonicalize()?; - Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json")? - .generate()? - .write_to_file(outdir.join("consensus_registry_abi.rs"))?; + Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json") + .map_err(|_| anyhow!("Failed ABI deserialization"))? + .generate() + .map_err(|_| anyhow!("Failed ABI generation"))? + .write_to_file(outdir.join("consensus_registry_abi.rs")) + .context("Failed to write ABI to file")?; + + if let Err(e) = configure_shell_autocompletion() { + println!("cargo:warning=It was not possible to install autocomplete scripts. Please generate them manually with `zkstack autocomplete`"); + println!("cargo:error={}", e); + }; zksync_protobuf_build::Config { input_root: "src/commands/consensus/proto".into(), @@ -19,3 +30,103 @@ fn main() -> eyre::Result<()> { .unwrap(); Ok(()) } + +fn configure_shell_autocompletion() -> anyhow::Result<()> { + // Array of supported shells + let shells = [ + clap_complete::Shell::Bash, + clap_complete::Shell::Fish, + clap_complete::Shell::Zsh, + ]; + + for shell in shells { + std::fs::create_dir_all(&shell.autocomplete_folder()?) + .context("it was impossible to create the configuration directory")?; + + let src = Path::new(COMPLETION_DIR).join(shell.autocomplete_file_name()?); + let dst = shell + .autocomplete_folder()? + .join(shell.autocomplete_file_name()?); + + std::fs::copy(src, dst)?; + + shell + .configure_autocomplete() + .context("failed to run extra configuration requirements")?; + } + + Ok(()) +} + +pub trait ShellAutocomplete { + fn autocomplete_folder(&self) -> anyhow::Result; + fn autocomplete_file_name(&self) -> anyhow::Result; + /// Extra steps required for shells enable command autocomplete. + fn configure_autocomplete(&self) -> anyhow::Result<()>; +} + +impl ShellAutocomplete for clap_complete::Shell { + fn autocomplete_folder(&self) -> anyhow::Result { + let home_dir = dirs::home_dir().context("missing home folder")?; + + match self { + clap_complete::Shell::Bash => Ok(home_dir.join(".bash_completion.d")), + clap_complete::Shell::Fish => Ok(home_dir.join(".config/fish/completions")), + clap_complete::Shell::Zsh => Ok(home_dir.join(".zsh/completion")), + _ => anyhow::bail!("unsupported shell"), + } + } + + fn autocomplete_file_name(&self) -> anyhow::Result { + let crate_name = env!("CARGO_PKG_NAME"); + + match self { + clap_complete::Shell::Bash => Ok(format!("{}.sh", crate_name)), + clap_complete::Shell::Fish => Ok(format!("{}.fish", crate_name)), + clap_complete::Shell::Zsh => Ok(format!("_{}.zsh", crate_name)), + _ => anyhow::bail!("unsupported shell"), + } + } + + fn configure_autocomplete(&self) -> anyhow::Result<()> { + match self { + clap_complete::Shell::Bash | clap_complete::Shell::Zsh => { + let shell = &self.to_string().to_lowercase(); + let completion_file = self + .autocomplete_folder()? + .join(self.autocomplete_file_name()?); + + // Source the completion file inside .{shell}rc + let shell_rc = dirs::home_dir() + .context("missing home directory")? + .join(format!(".{}rc", shell)); + + if shell_rc.exists() { + let shell_rc_content = std::fs::read_to_string(&shell_rc) + .context(format!("could not read .{}rc", shell))?; + + if !shell_rc_content.contains("# zkstack completion") { + std::fs::write( + shell_rc, + format!( + "{}\n# zkstack completion\nsource \"{}\"\n", + shell_rc_content, + completion_file.to_str().unwrap() + ), + ) + .context(format!("could not write .{}rc", shell))?; + } + } else { + println!( + "cargo:warning=Please add the following line to your .{}rc:", + shell + ); + println!("cargo:warning=source {}", completion_file.to_str().unwrap()); + } + } + _ => (), + } + + Ok(()) + } +} diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh new file mode 100644 index 00000000000..a8a60a6130a --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -0,0 +1,4997 @@ +#compdef zkstack + +autoload -U is-at-least + +_zkstack() { + typeset -A opt_args + typeset -a _arguments_options + local ret=1 + + if is-at-least 5.2; then + _arguments_options=(-s -S -C) + else + _arguments_options=(-s -C) + fi + + local context curcontext="$curcontext" state line + _arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +'-V[Print version]' \ +'--version[Print version]' \ +":: :_zkstack_commands" \ +"*::: :->zkstack" \ +&& ret=0 + case $state in + (zkstack) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-command-$line[1]:" + case $line[1] in + (autocomplete) +_arguments "${_arguments_options[@]}" : \ +'--generate=[The shell to generate the autocomplete script for]:GENERATOR:(bash elvish fish powershell zsh)' \ +'-o+[The out directory to write the autocomplete script to]:OUT:_files' \ +'--out=[The out directory to write the autocomplete script to]:OUT:_files' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(ecosystem) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__ecosystem_commands" \ +"*::: :->ecosystem" \ +&& ret=0 + + case $state in + (ecosystem) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-ecosystem-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--ecosystem-name=[]:ECOSYSTEM_NAME: ' \ +'--l1-network=[L1 Network]:L1_NETWORK:(localhost sepolia holesky mainnet)' \ +'--link-to-code=[Code link]:LINK_TO_CODE:_files -/' \ +'--chain-name=[]:CHAIN_NAME: ' \ +'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ +'--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" +random\:"Generate random wallets" +empty\:"Generate placeholder wallets" +in-file\:"Specify file with wallets"))' \ +'--wallet-path=[Wallet path]:WALLET_PATH:_files' \ +'--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--set-as-default=[Set as default chain]' \ +'--start-containers=[Start reth and postgres containers after creation]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--legacy-bridge[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +'--sender=[Address of the transaction sender]:SENDER: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'-o+[Output directory for the generated files]:OUT:_files' \ +'--out=[Output directory for the generated files]:OUT:_files' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--deploy-erc20=[Deploy ERC20 contracts]' \ +'--deploy-ecosystem=[Deploy ecosystem contracts]' \ +'--ecosystem-contracts-path=[Path to ecosystem contracts]:ECOSYSTEM_CONTRACTS_PATH:_files' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--deploy-paymaster=[Deploy Paymaster contract]' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'-o+[Enable Grafana]' \ +'--observability=[Enable Grafana]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-u[Use default database urls and names]' \ +'--use-default[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'--ecosystem-only[Initialize ecosystem only and skip chain initialization (chain can be initialized later with \`chain init\` subcommand)]' \ +'--dev[Deploy ecosystem using all defaults. Suitable for local development]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +'::name:' \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__ecosystem__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-ecosystem-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(chain) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__chain_commands" \ +"*::: :->chain" \ +&& ret=0 + + case $state in + (chain) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--chain-name=[]:CHAIN_NAME: ' \ +'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ +'--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" +random\:"Generate random wallets" +empty\:"Generate placeholder wallets" +in-file\:"Specify file with wallets"))' \ +'--wallet-path=[Wallet path]:WALLET_PATH:_files' \ +'--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--set-as-default=[Set as default chain]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--legacy-bridge[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +'-o+[Output directory for the generated files]:OUT:_files' \ +'--out=[Output directory for the generated files]:OUT:_files' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--deploy-paymaster=[]' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-u[Use default database urls and names]' \ +'--use-default[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +":: :_zkstack__chain__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-u[Use default database urls and names]' \ +'--use-default[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__init__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-init-help-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-u[Use default database urls and names]' \ +'--use-default[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__chain__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-u[Use default database urls and names]' \ +'--use-default[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__genesis__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-genesis-help-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(dev) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev_commands" \ +"*::: :->dev" \ +&& ret=0 + + case $state in + (dev) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +'--database=[Database to create new migration for]:DATABASE:(prover core)' \ +'--name=[Migration name]:NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__database__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-database-help-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +'-t+[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ +'--test-pattern=[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-e[Run tests for external node]' \ +'--external-node[Run tests for external node]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'--enable-consensus[Enable consensus]' \ +'-e[Run tests for external node]' \ +'--external-node[Run tests for external node]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-s[Run recovery from a snapshot instead of genesis]' \ +'--snapshot[Run recovery from a snapshot instead of genesis]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +'--options=[Cargo test flags]:OPTIONS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__test__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-test-help-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__clean__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-clean-help-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__snapshot__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-snapshot-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +'*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-c[]' \ +'--check[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-c[]' \ +'--check[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +'*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__fmt__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-fmt-help-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +'--number=[]:NUMBER: ' \ +'--version=[]:VERSION: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--default[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +'--version=[]:VERSION: ' \ +'--snark-wrapper=[]:SNARK_WRAPPER: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--default[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__prover__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-prover-help-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +'--l1-contracts=[Build L1 contracts]' \ +'--l2-contracts=[Build L2 contracts]' \ +'--system-contracts=[Build system contracts]' \ +'--test-contracts=[Build test contracts]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +'-p+[Path to the config file to override]:PATH: ' \ +'--path=[Path to the config file to override]:PATH: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +'--file=[]:FILE:_files' \ +'--private-key=[]:PRIVATE_KEY: ' \ +'--l1-rpc-url=[]:L1_RPC_URL: ' \ +'--confirmations=[]:CONFIRMATIONS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +'-u+[URL of the health check endpoint]:URL: ' \ +'--url=[URL of the health check endpoint]:URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__status__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-status-help-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-prover-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +'--proof-store-dir=[]:PROOF_STORE_DIR: ' \ +'--bucket-base-url=[]:BUCKET_BASE_URL: ' \ +'--credentials-file=[]:CREDENTIALS_FILE: ' \ +'--bucket-name=[]:BUCKET_NAME: ' \ +'--location=[]:LOCATION: ' \ +'--project-id=[]:PROJECT_ID: ' \ +'--shall-save-to-public-bucket=[]:SHALL_SAVE_TO_PUBLIC_BUCKET:(true false)' \ +'--public-store-dir=[]:PUBLIC_STORE_DIR: ' \ +'--public-bucket-base-url=[]:PUBLIC_BUCKET_BASE_URL: ' \ +'--public-credentials-file=[]:PUBLIC_CREDENTIALS_FILE: ' \ +'--public-bucket-name=[]:PUBLIC_BUCKET_NAME: ' \ +'--public-location=[]:PUBLIC_LOCATION: ' \ +'--public-project-id=[]:PUBLIC_PROJECT_ID: ' \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ +'--bellman-cuda=[]' \ +'--setup-compressor-key=[]' \ +'--path=[]:PATH: ' \ +'--region=[]:REGION:(us europe asia)' \ +'--mode=[]:MODE:(download generate)' \ +'--setup-keys=[]' \ +'--setup-database=[]:SETUP_DATABASE:(true false)' \ +'--prover-db-url=[Prover database url without database name]:PROVER_DB_URL: ' \ +'--prover-db-name=[Prover database name]:PROVER_DB_NAME: ' \ +'-u+[Use default database urls and names]:USE_DEFAULT:(true false)' \ +'--use-default=[Use default database urls and names]:USE_DEFAULT:(true false)' \ +'-d+[]:DONT_DROP:(true false)' \ +'--dont-drop=[]:DONT_DROP:(true false)' \ +'--cloud-type=[]:CLOUD_TYPE:(gcp local)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--dev[]' \ +'(--bellman-cuda-dir)--clone[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +'--region=[]:REGION:(us europe asia)' \ +'--mode=[]:MODE:(download generate)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'--component=[]:COMPONENT:(gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor)' \ +'--round=[]:ROUND:(all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler)' \ +'--threads=[]:THREADS: ' \ +'--max-allocation=[Memory allocation limit in bytes (for prover component)]:MAX_ALLOCATION: ' \ +'--witness-vector-generator-count=[]:WITNESS_VECTOR_GENERATOR_COUNT: ' \ +'--max-allocation=[]:MAX_ALLOCATION: ' \ +'--docker=[]:DOCKER:(true false)' \ +'--tag=[]:TAG: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'(--bellman-cuda-dir)--clone[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +'--path=[]:PATH: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__prover__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-prover-help-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(server) +_arguments "${_arguments_options[@]}" : \ +'*--components=[Components of server to run]:COMPONENTS: ' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--genesis[Run server in genesis mode]' \ +'--build[Build server but don'\''t run it]' \ +'--uring[Enables uring support for RocksDB]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(external-node) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__external-node_commands" \ +"*::: :->external-node" \ +&& ret=0 + + case $state in + (external-node) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-external-node-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +'--db-url=[]:DB_URL: ' \ +'--db-name=[]:DB_NAME: ' \ +'--l1-rpc-url=[]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-u[Use default database urls and names]' \ +'--use-default[Use default database urls and names]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'*--components=[Components of server to run]:COMPONENTS: ' \ +'--enable-consensus=[Enable consensus]' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--reinit[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__external-node__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-external-node-help-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +'-o+[Enable Grafana]' \ +'--observability=[Enable Grafana]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contract-verifier) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__contract-verifier_commands" \ +"*::: :->contract-verifier" \ +&& ret=0 + + case $state in + (contract-verifier) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-contract-verifier-command-$line[1]:" + case $line[1] in + (run) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--zksolc-version=[Version of zksolc to install]:ZKSOLC_VERSION: ' \ +'--zkvyper-version=[Version of zkvyper to install]:ZKVYPER_VERSION: ' \ +'--solc-version=[Version of solc to install]:SOLC_VERSION: ' \ +'--era-vm-solc-version=[Version of era vm solc to install]:ERA_VM_SOLC_VERSION: ' \ +'--vyper-version=[Version of vyper to install]:VYPER_VERSION: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--only[Install only provided compilers]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__contract-verifier__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-contract-verifier-help-command-$line[1]:" + case $line[1] in + (run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(portal) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(explorer) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__explorer_commands" \ +"*::: :->explorer" \ +&& ret=0 + + case $state in + (explorer) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-explorer-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__explorer__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-explorer-help-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(consensus) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__consensus_commands" \ +"*::: :->consensus" \ +&& ret=0 + + case $state in + (consensus) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-consensus-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +'--from-file=[Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in \`commands/consensus/proto/mod.proto\`]:FROM_FILE:_files' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--from-genesis[Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__consensus__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-consensus-help-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(update) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-c[Update only the config files]' \ +'--only-config[Update only the config files]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(markdown) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-command-$line[1]:" + case $line[1] in + (autocomplete) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(ecosystem) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__ecosystem_commands" \ +"*::: :->ecosystem" \ +&& ret=0 + + case $state in + (ecosystem) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-ecosystem-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(chain) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain_commands" \ +"*::: :->chain" \ +&& ret=0 + + case $state in + (chain) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(dev) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev_commands" \ +"*::: :->dev" \ +&& ret=0 + + case $state in + (dev) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-prover-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(external-node) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__external-node_commands" \ +"*::: :->external-node" \ +&& ret=0 + + case $state in + (external-node) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-external-node-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract-verifier) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__contract-verifier_commands" \ +"*::: :->contract-verifier" \ +&& ret=0 + + case $state in + (contract-verifier) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-contract-verifier-command-$line[1]:" + case $line[1] in + (run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(portal) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(explorer) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__explorer_commands" \ +"*::: :->explorer" \ +&& ret=0 + + case $state in + (explorer) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-explorer-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(consensus) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__consensus_commands" \ +"*::: :->consensus" \ +&& ret=0 + + case $state in + (consensus) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-consensus-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(update) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(markdown) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +} + +(( $+functions[_zkstack_commands] )) || +_zkstack_commands() { + local commands; commands=( +'autocomplete:Create shell autocompletion files' \ +'ecosystem:Ecosystem related commands' \ +'chain:Chain related commands' \ +'dev:Supervisor related commands' \ +'prover:Prover related commands' \ +'server:Run server' \ +'external-node:External Node related commands' \ +'containers:Run containers for local development' \ +'contract-verifier:Run contract verifier' \ +'portal:Run dapp-portal' \ +'explorer:Run block-explorer' \ +'consensus:Consensus utilities' \ +'update:Update ZKsync' \ +'markdown:Print markdown help' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack commands' commands "$@" +} +(( $+functions[_zkstack__autocomplete_commands] )) || +_zkstack__autocomplete_commands() { + local commands; commands=() + _describe -t commands 'zkstack autocomplete commands' commands "$@" +} +(( $+functions[_zkstack__chain_commands] )) || +_zkstack__chain_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__accept-chain-ownership_commands] )) || +_zkstack__chain__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__chain__build-transactions_commands] )) || +_zkstack__chain__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__chain__create_commands] )) || +_zkstack__chain__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain create commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-consensus-registry_commands] )) || +_zkstack__chain__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-l2-contracts_commands] )) || +_zkstack__chain__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-multicall3_commands] )) || +_zkstack__chain__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-paymaster_commands] )) || +_zkstack__chain__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-upgrader_commands] )) || +_zkstack__chain__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis_commands] )) || +_zkstack__chain__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain genesis commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help_commands] )) || +_zkstack__chain__genesis__help_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain genesis help commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__help_commands] )) || +_zkstack__chain__genesis__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__init-database_commands] )) || +_zkstack__chain__genesis__help__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__server_commands] )) || +_zkstack__chain__genesis__help__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help server commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__init-database_commands] )) || +_zkstack__chain__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__server_commands] )) || +_zkstack__chain__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis server commands' commands "$@" +} +(( $+functions[_zkstack__chain__help_commands] )) || +_zkstack__chain__help_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain help commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__accept-chain-ownership_commands] )) || +_zkstack__chain__help__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__build-transactions_commands] )) || +_zkstack__chain__help__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__create_commands] )) || +_zkstack__chain__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help create commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-consensus-registry_commands] )) || +_zkstack__chain__help__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-l2-contracts_commands] )) || +_zkstack__chain__help__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-multicall3_commands] )) || +_zkstack__chain__help__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-paymaster_commands] )) || +_zkstack__chain__help__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-upgrader_commands] )) || +_zkstack__chain__help__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis_commands] )) || +_zkstack__chain__help__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ + ) + _describe -t commands 'zkstack chain help genesis commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis__init-database_commands] )) || +_zkstack__chain__help__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis__server_commands] )) || +_zkstack__chain__help__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help genesis server commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__help_commands] )) || +_zkstack__chain__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__init_commands] )) || +_zkstack__chain__help__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ + ) + _describe -t commands 'zkstack chain help init commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__init__configs_commands] )) || +_zkstack__chain__help__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help init configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__initialize-bridges_commands] )) || +_zkstack__chain__help__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__register-chain_commands] )) || +_zkstack__chain__help__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help register-chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__update-token-multiplier-setter_commands] )) || +_zkstack__chain__help__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__chain__init_commands] )) || +_zkstack__chain__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain init commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__configs_commands] )) || +_zkstack__chain__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help_commands] )) || +_zkstack__chain__init__help_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain init help commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help__configs_commands] )) || +_zkstack__chain__init__help__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init help configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help__help_commands] )) || +_zkstack__chain__init__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__initialize-bridges_commands] )) || +_zkstack__chain__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__chain__register-chain_commands] )) || +_zkstack__chain__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain register-chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__update-token-multiplier-setter_commands] )) || +_zkstack__chain__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__consensus_commands] )) || +_zkstack__consensus_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack consensus commands' commands "$@" +} +(( $+functions[_zkstack__consensus__get-attester-committee_commands] )) || +_zkstack__consensus__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help_commands] )) || +_zkstack__consensus__help_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack consensus help commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__get-attester-committee_commands] )) || +_zkstack__consensus__help__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__help_commands] )) || +_zkstack__consensus__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help help commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__set-attester-committee_commands] )) || +_zkstack__consensus__help__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__set-attester-committee_commands] )) || +_zkstack__consensus__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__containers_commands] )) || +_zkstack__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack containers commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier_commands] )) || +_zkstack__contract-verifier_commands() { + local commands; commands=( +'run:Run contract verifier' \ +'init:Download required binaries for contract verifier' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack contract-verifier commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help_commands] )) || +_zkstack__contract-verifier__help_commands() { + local commands; commands=( +'run:Run contract verifier' \ +'init:Download required binaries for contract verifier' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack contract-verifier help commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__help_commands] )) || +_zkstack__contract-verifier__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help help commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__init_commands] )) || +_zkstack__contract-verifier__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help init commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__run_commands] )) || +_zkstack__contract-verifier__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help run commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__init_commands] )) || +_zkstack__contract-verifier__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier init commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__run_commands] )) || +_zkstack__contract-verifier__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier run commands' commands "$@" +} +(( $+functions[_zkstack__dev_commands] )) || +_zkstack__dev_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean_commands] )) || +_zkstack__dev__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev clean commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__all_commands] )) || +_zkstack__dev__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean all commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__containers_commands] )) || +_zkstack__dev__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__contracts-cache_commands] )) || +_zkstack__dev__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help_commands] )) || +_zkstack__dev__clean__help_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev clean help commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__all_commands] )) || +_zkstack__dev__clean__help__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help all commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__containers_commands] )) || +_zkstack__dev__clean__help__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__contracts-cache_commands] )) || +_zkstack__dev__clean__help__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__help_commands] )) || +_zkstack__dev__clean__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__config-writer_commands] )) || +_zkstack__dev__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev config-writer commands' commands "$@" +} +(( $+functions[_zkstack__dev__contracts_commands] )) || +_zkstack__dev__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__database_commands] )) || +_zkstack__dev__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev database commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__check-sqlx-data_commands] )) || +_zkstack__dev__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__drop_commands] )) || +_zkstack__dev__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help_commands] )) || +_zkstack__dev__database__help_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev database help commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__check-sqlx-data_commands] )) || +_zkstack__dev__database__help__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__drop_commands] )) || +_zkstack__dev__database__help__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__help_commands] )) || +_zkstack__dev__database__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__migrate_commands] )) || +_zkstack__dev__database__help__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__new-migration_commands] )) || +_zkstack__dev__database__help__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__prepare_commands] )) || +_zkstack__dev__database__help__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__reset_commands] )) || +_zkstack__dev__database__help__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__setup_commands] )) || +_zkstack__dev__database__help__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__migrate_commands] )) || +_zkstack__dev__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__new-migration_commands] )) || +_zkstack__dev__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__prepare_commands] )) || +_zkstack__dev__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__reset_commands] )) || +_zkstack__dev__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__setup_commands] )) || +_zkstack__dev__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt_commands] )) || +_zkstack__dev__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev fmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__contract_commands] )) || +_zkstack__dev__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help_commands] )) || +_zkstack__dev__fmt__help_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev fmt help commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__contract_commands] )) || +_zkstack__dev__fmt__help__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__help_commands] )) || +_zkstack__dev__fmt__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__prettier_commands] )) || +_zkstack__dev__fmt__help__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__rustfmt_commands] )) || +_zkstack__dev__fmt__help__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__prettier_commands] )) || +_zkstack__dev__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__rustfmt_commands] )) || +_zkstack__dev__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__generate-genesis_commands] )) || +_zkstack__dev__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__dev__help_commands] )) || +_zkstack__dev__help_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev help commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean_commands] )) || +_zkstack__dev__help__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ + ) + _describe -t commands 'zkstack dev help clean commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__all_commands] )) || +_zkstack__dev__help__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean all commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__containers_commands] )) || +_zkstack__dev__help__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__contracts-cache_commands] )) || +_zkstack__dev__help__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__config-writer_commands] )) || +_zkstack__dev__help__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help config-writer commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__contracts_commands] )) || +_zkstack__dev__help__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database_commands] )) || +_zkstack__dev__help__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ + ) + _describe -t commands 'zkstack dev help database commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__check-sqlx-data_commands] )) || +_zkstack__dev__help__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__drop_commands] )) || +_zkstack__dev__help__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__migrate_commands] )) || +_zkstack__dev__help__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__new-migration_commands] )) || +_zkstack__dev__help__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__prepare_commands] )) || +_zkstack__dev__help__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__reset_commands] )) || +_zkstack__dev__help__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__setup_commands] )) || +_zkstack__dev__help__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt_commands] )) || +_zkstack__dev__help__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ + ) + _describe -t commands 'zkstack dev help fmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__contract_commands] )) || +_zkstack__dev__help__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__prettier_commands] )) || +_zkstack__dev__help__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__rustfmt_commands] )) || +_zkstack__dev__help__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__generate-genesis_commands] )) || +_zkstack__dev__help__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__help_commands] )) || +_zkstack__dev__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__lint_commands] )) || +_zkstack__dev__help__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help lint commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover_commands] )) || +_zkstack__dev__help__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ + ) + _describe -t commands 'zkstack dev help prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__info_commands] )) || +_zkstack__dev__help__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover info commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__insert-batch_commands] )) || +_zkstack__dev__help__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__insert-version_commands] )) || +_zkstack__dev__help__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__send-transactions_commands] )) || +_zkstack__dev__help__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__snapshot_commands] )) || +_zkstack__dev__help__snapshot_commands() { + local commands; commands=( +'create:' \ + ) + _describe -t commands 'zkstack dev help snapshot commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__snapshot__create_commands] )) || +_zkstack__dev__help__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__status_commands] )) || +_zkstack__dev__help__status_commands() { + local commands; commands=( +'ports:Show used ports' \ + ) + _describe -t commands 'zkstack dev help status commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__status__ports_commands] )) || +_zkstack__dev__help__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help status ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test_commands] )) || +_zkstack__dev__help__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ + ) + _describe -t commands 'zkstack dev help test commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__build_commands] )) || +_zkstack__dev__help__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test build commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__fees_commands] )) || +_zkstack__dev__help__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__integration_commands] )) || +_zkstack__dev__help__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__l1-contracts_commands] )) || +_zkstack__dev__help__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__loadtest_commands] )) || +_zkstack__dev__help__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__prover_commands] )) || +_zkstack__dev__help__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__recovery_commands] )) || +_zkstack__dev__help__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__revert_commands] )) || +_zkstack__dev__help__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__rust_commands] )) || +_zkstack__dev__help__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__upgrade_commands] )) || +_zkstack__dev__help__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__wallet_commands] )) || +_zkstack__dev__help__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test wallet commands' commands "$@" +} +(( $+functions[_zkstack__dev__lint_commands] )) || +_zkstack__dev__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev lint commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover_commands] )) || +_zkstack__dev__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help_commands] )) || +_zkstack__dev__prover__help_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev prover help commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__help_commands] )) || +_zkstack__dev__prover__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__info_commands] )) || +_zkstack__dev__prover__help__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help info commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__insert-batch_commands] )) || +_zkstack__dev__prover__help__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__insert-version_commands] )) || +_zkstack__dev__prover__help__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__info_commands] )) || +_zkstack__dev__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover info commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__insert-batch_commands] )) || +_zkstack__dev__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__insert-version_commands] )) || +_zkstack__dev__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__send-transactions_commands] )) || +_zkstack__dev__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot_commands] )) || +_zkstack__dev__snapshot_commands() { + local commands; commands=( +'create:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev snapshot commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__create_commands] )) || +_zkstack__dev__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help_commands] )) || +_zkstack__dev__snapshot__help_commands() { + local commands; commands=( +'create:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev snapshot help commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help__create_commands] )) || +_zkstack__dev__snapshot__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot help create commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help__help_commands] )) || +_zkstack__dev__snapshot__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status_commands] )) || +_zkstack__dev__status_commands() { + local commands; commands=( +'ports:Show used ports' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev status commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help_commands] )) || +_zkstack__dev__status__help_commands() { + local commands; commands=( +'ports:Show used ports' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev status help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help__help_commands] )) || +_zkstack__dev__status__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help__ports_commands] )) || +_zkstack__dev__status__help__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status help ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__ports_commands] )) || +_zkstack__dev__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__test_commands] )) || +_zkstack__dev__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev test commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__build_commands] )) || +_zkstack__dev__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test build commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__fees_commands] )) || +_zkstack__dev__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help_commands] )) || +_zkstack__dev__test__help_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev test help commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__build_commands] )) || +_zkstack__dev__test__help__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help build commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__fees_commands] )) || +_zkstack__dev__test__help__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__help_commands] )) || +_zkstack__dev__test__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__integration_commands] )) || +_zkstack__dev__test__help__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__l1-contracts_commands] )) || +_zkstack__dev__test__help__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__loadtest_commands] )) || +_zkstack__dev__test__help__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__prover_commands] )) || +_zkstack__dev__test__help__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__recovery_commands] )) || +_zkstack__dev__test__help__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__revert_commands] )) || +_zkstack__dev__test__help__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__rust_commands] )) || +_zkstack__dev__test__help__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__upgrade_commands] )) || +_zkstack__dev__test__help__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__wallet_commands] )) || +_zkstack__dev__test__help__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help wallet commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__integration_commands] )) || +_zkstack__dev__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__l1-contracts_commands] )) || +_zkstack__dev__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__loadtest_commands] )) || +_zkstack__dev__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__prover_commands] )) || +_zkstack__dev__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__recovery_commands] )) || +_zkstack__dev__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__revert_commands] )) || +_zkstack__dev__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__rust_commands] )) || +_zkstack__dev__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__upgrade_commands] )) || +_zkstack__dev__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__wallet_commands] )) || +_zkstack__dev__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test wallet commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem_commands] )) || +_zkstack__ecosystem_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack ecosystem commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__build-transactions_commands] )) || +_zkstack__ecosystem__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__change-default-chain_commands] )) || +_zkstack__ecosystem__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__create_commands] )) || +_zkstack__ecosystem__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem create commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help_commands] )) || +_zkstack__ecosystem__help_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack ecosystem help commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__build-transactions_commands] )) || +_zkstack__ecosystem__help__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__change-default-chain_commands] )) || +_zkstack__ecosystem__help__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__create_commands] )) || +_zkstack__ecosystem__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help create commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__help_commands] )) || +_zkstack__ecosystem__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help help commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__init_commands] )) || +_zkstack__ecosystem__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help init commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__setup-observability_commands] )) || +_zkstack__ecosystem__help__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__init_commands] )) || +_zkstack__ecosystem__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem init commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__setup-observability_commands] )) || +_zkstack__ecosystem__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__explorer_commands] )) || +_zkstack__explorer_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack explorer commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help_commands] )) || +_zkstack__explorer__help_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack explorer help commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__help_commands] )) || +_zkstack__explorer__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help help commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__init_commands] )) || +_zkstack__explorer__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help init commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__run_commands] )) || +_zkstack__explorer__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help run commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__run-backend_commands] )) || +_zkstack__explorer__help__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help run-backend commands' commands "$@" +} +(( $+functions[_zkstack__explorer__init_commands] )) || +_zkstack__explorer__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer init commands' commands "$@" +} +(( $+functions[_zkstack__explorer__run_commands] )) || +_zkstack__explorer__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer run commands' commands "$@" +} +(( $+functions[_zkstack__explorer__run-backend_commands] )) || +_zkstack__explorer__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer run-backend commands' commands "$@" +} +(( $+functions[_zkstack__external-node_commands] )) || +_zkstack__external-node_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'run:Run external node' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack external-node commands' commands "$@" +} +(( $+functions[_zkstack__external-node__configs_commands] )) || +_zkstack__external-node__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node configs commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help_commands] )) || +_zkstack__external-node__help_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'run:Run external node' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack external-node help commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__configs_commands] )) || +_zkstack__external-node__help__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help configs commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__help_commands] )) || +_zkstack__external-node__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help help commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__init_commands] )) || +_zkstack__external-node__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help init commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__run_commands] )) || +_zkstack__external-node__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help run commands' commands "$@" +} +(( $+functions[_zkstack__external-node__init_commands] )) || +_zkstack__external-node__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node init commands' commands "$@" +} +(( $+functions[_zkstack__external-node__run_commands] )) || +_zkstack__external-node__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node run commands' commands "$@" +} +(( $+functions[_zkstack__help_commands] )) || +_zkstack__help_commands() { + local commands; commands=( +'autocomplete:Create shell autocompletion files' \ +'ecosystem:Ecosystem related commands' \ +'chain:Chain related commands' \ +'dev:Supervisor related commands' \ +'prover:Prover related commands' \ +'server:Run server' \ +'external-node:External Node related commands' \ +'containers:Run containers for local development' \ +'contract-verifier:Run contract verifier' \ +'portal:Run dapp-portal' \ +'explorer:Run block-explorer' \ +'consensus:Consensus utilities' \ +'update:Update ZKsync' \ +'markdown:Print markdown help' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack help commands' commands "$@" +} +(( $+functions[_zkstack__help__autocomplete_commands] )) || +_zkstack__help__autocomplete_commands() { + local commands; commands=() + _describe -t commands 'zkstack help autocomplete commands' commands "$@" +} +(( $+functions[_zkstack__help__chain_commands] )) || +_zkstack__help__chain_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ + ) + _describe -t commands 'zkstack help chain commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__accept-chain-ownership_commands] )) || +_zkstack__help__chain__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__build-transactions_commands] )) || +_zkstack__help__chain__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__create_commands] )) || +_zkstack__help__chain__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain create commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-consensus-registry_commands] )) || +_zkstack__help__chain__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-l2-contracts_commands] )) || +_zkstack__help__chain__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-multicall3_commands] )) || +_zkstack__help__chain__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-paymaster_commands] )) || +_zkstack__help__chain__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-upgrader_commands] )) || +_zkstack__help__chain__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis_commands] )) || +_zkstack__help__chain__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ + ) + _describe -t commands 'zkstack help chain genesis commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis__init-database_commands] )) || +_zkstack__help__chain__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis__server_commands] )) || +_zkstack__help__chain__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain genesis server commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__init_commands] )) || +_zkstack__help__chain__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ + ) + _describe -t commands 'zkstack help chain init commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__init__configs_commands] )) || +_zkstack__help__chain__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain init configs commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__initialize-bridges_commands] )) || +_zkstack__help__chain__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__register-chain_commands] )) || +_zkstack__help__chain__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain register-chain commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__update-token-multiplier-setter_commands] )) || +_zkstack__help__chain__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus_commands] )) || +_zkstack__help__consensus_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ + ) + _describe -t commands 'zkstack help consensus commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus__get-attester-committee_commands] )) || +_zkstack__help__consensus__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack help consensus get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus__set-attester-committee_commands] )) || +_zkstack__help__consensus__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack help consensus set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__help__containers_commands] )) || +_zkstack__help__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack help containers commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier_commands] )) || +_zkstack__help__contract-verifier_commands() { + local commands; commands=( +'run:Run contract verifier' \ +'init:Download required binaries for contract verifier' \ + ) + _describe -t commands 'zkstack help contract-verifier commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__init_commands] )) || +_zkstack__help__contract-verifier__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier init commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__run_commands] )) || +_zkstack__help__contract-verifier__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier run commands' commands "$@" +} +(( $+functions[_zkstack__help__dev_commands] )) || +_zkstack__help__dev_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ + ) + _describe -t commands 'zkstack help dev commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean_commands] )) || +_zkstack__help__dev__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ + ) + _describe -t commands 'zkstack help dev clean commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__all_commands] )) || +_zkstack__help__dev__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean all commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__containers_commands] )) || +_zkstack__help__dev__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean containers commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__contracts-cache_commands] )) || +_zkstack__help__dev__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__config-writer_commands] )) || +_zkstack__help__dev__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev config-writer commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__contracts_commands] )) || +_zkstack__help__dev__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database_commands] )) || +_zkstack__help__dev__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ + ) + _describe -t commands 'zkstack help dev database commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__check-sqlx-data_commands] )) || +_zkstack__help__dev__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__drop_commands] )) || +_zkstack__help__dev__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database drop commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__migrate_commands] )) || +_zkstack__help__dev__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database migrate commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__new-migration_commands] )) || +_zkstack__help__dev__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__prepare_commands] )) || +_zkstack__help__dev__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database prepare commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__reset_commands] )) || +_zkstack__help__dev__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database reset commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__setup_commands] )) || +_zkstack__help__dev__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database setup commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt_commands] )) || +_zkstack__help__dev__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ + ) + _describe -t commands 'zkstack help dev fmt commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__contract_commands] )) || +_zkstack__help__dev__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__prettier_commands] )) || +_zkstack__help__dev__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__rustfmt_commands] )) || +_zkstack__help__dev__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__generate-genesis_commands] )) || +_zkstack__help__dev__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__lint_commands] )) || +_zkstack__help__dev__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev lint commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover_commands] )) || +_zkstack__help__dev__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ + ) + _describe -t commands 'zkstack help dev prover commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__info_commands] )) || +_zkstack__help__dev__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover info commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__insert-batch_commands] )) || +_zkstack__help__dev__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__insert-version_commands] )) || +_zkstack__help__dev__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__send-transactions_commands] )) || +_zkstack__help__dev__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__snapshot_commands] )) || +_zkstack__help__dev__snapshot_commands() { + local commands; commands=( +'create:' \ + ) + _describe -t commands 'zkstack help dev snapshot commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__snapshot__create_commands] )) || +_zkstack__help__dev__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__status_commands] )) || +_zkstack__help__dev__status_commands() { + local commands; commands=( +'ports:Show used ports' \ + ) + _describe -t commands 'zkstack help dev status commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__status__ports_commands] )) || +_zkstack__help__dev__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev status ports commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test_commands] )) || +_zkstack__help__dev__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ + ) + _describe -t commands 'zkstack help dev test commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__build_commands] )) || +_zkstack__help__dev__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test build commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__fees_commands] )) || +_zkstack__help__dev__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test fees commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__integration_commands] )) || +_zkstack__help__dev__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test integration commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__l1-contracts_commands] )) || +_zkstack__help__dev__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__loadtest_commands] )) || +_zkstack__help__dev__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__prover_commands] )) || +_zkstack__help__dev__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test prover commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__recovery_commands] )) || +_zkstack__help__dev__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test recovery commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__revert_commands] )) || +_zkstack__help__dev__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test revert commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__rust_commands] )) || +_zkstack__help__dev__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test rust commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__upgrade_commands] )) || +_zkstack__help__dev__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__wallet_commands] )) || +_zkstack__help__dev__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test wallet commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem_commands] )) || +_zkstack__help__ecosystem_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ + ) + _describe -t commands 'zkstack help ecosystem commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__build-transactions_commands] )) || +_zkstack__help__ecosystem__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__change-default-chain_commands] )) || +_zkstack__help__ecosystem__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__create_commands] )) || +_zkstack__help__ecosystem__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem create commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__init_commands] )) || +_zkstack__help__ecosystem__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem init commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__setup-observability_commands] )) || +_zkstack__help__ecosystem__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer_commands] )) || +_zkstack__help__explorer_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ + ) + _describe -t commands 'zkstack help explorer commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__init_commands] )) || +_zkstack__help__explorer__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer init commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__run_commands] )) || +_zkstack__help__explorer__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer run commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__run-backend_commands] )) || +_zkstack__help__explorer__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer run-backend commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node_commands] )) || +_zkstack__help__external-node_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'run:Run external node' \ + ) + _describe -t commands 'zkstack help external-node commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__configs_commands] )) || +_zkstack__help__external-node__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node configs commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__init_commands] )) || +_zkstack__help__external-node__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node init commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__run_commands] )) || +_zkstack__help__external-node__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node run commands' commands "$@" +} +(( $+functions[_zkstack__help__help_commands] )) || +_zkstack__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack help help commands' commands "$@" +} +(( $+functions[_zkstack__help__markdown_commands] )) || +_zkstack__help__markdown_commands() { + local commands; commands=() + _describe -t commands 'zkstack help markdown commands' commands "$@" +} +(( $+functions[_zkstack__help__portal_commands] )) || +_zkstack__help__portal_commands() { + local commands; commands=() + _describe -t commands 'zkstack help portal commands' commands "$@" +} +(( $+functions[_zkstack__help__prover_commands] )) || +_zkstack__help__prover_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ + ) + _describe -t commands 'zkstack help prover commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__compressor-keys_commands] )) || +_zkstack__help__prover__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__init_commands] )) || +_zkstack__help__prover__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover init commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__init-bellman-cuda_commands] )) || +_zkstack__help__prover__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__run_commands] )) || +_zkstack__help__prover__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover run commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__setup-keys_commands] )) || +_zkstack__help__prover__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__help__server_commands] )) || +_zkstack__help__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack help server commands' commands "$@" +} +(( $+functions[_zkstack__help__update_commands] )) || +_zkstack__help__update_commands() { + local commands; commands=() + _describe -t commands 'zkstack help update commands' commands "$@" +} +(( $+functions[_zkstack__markdown_commands] )) || +_zkstack__markdown_commands() { + local commands; commands=() + _describe -t commands 'zkstack markdown commands' commands "$@" +} +(( $+functions[_zkstack__portal_commands] )) || +_zkstack__portal_commands() { + local commands; commands=() + _describe -t commands 'zkstack portal commands' commands "$@" +} +(( $+functions[_zkstack__prover_commands] )) || +_zkstack__prover_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack prover commands' commands "$@" +} +(( $+functions[_zkstack__prover__compressor-keys_commands] )) || +_zkstack__prover__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__help_commands] )) || +_zkstack__prover__help_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack prover help commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__compressor-keys_commands] )) || +_zkstack__prover__help__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__help_commands] )) || +_zkstack__prover__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help help commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__init_commands] )) || +_zkstack__prover__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help init commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__init-bellman-cuda_commands] )) || +_zkstack__prover__help__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__run_commands] )) || +_zkstack__prover__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help run commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__setup-keys_commands] )) || +_zkstack__prover__help__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__init_commands] )) || +_zkstack__prover__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover init commands' commands "$@" +} +(( $+functions[_zkstack__prover__init-bellman-cuda_commands] )) || +_zkstack__prover__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__prover__run_commands] )) || +_zkstack__prover__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover run commands' commands "$@" +} +(( $+functions[_zkstack__prover__setup-keys_commands] )) || +_zkstack__prover__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__server_commands] )) || +_zkstack__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack server commands' commands "$@" +} +(( $+functions[_zkstack__update_commands] )) || +_zkstack__update_commands() { + local commands; commands=() + _describe -t commands 'zkstack update commands' commands "$@" +} + +if [ "$funcstack[1]" = "_zkstack" ]; then + _zkstack "$@" +else + compdef _zkstack zkstack +fi diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.fish b/zkstack_cli/crates/zkstack/completion/zkstack.fish new file mode 100644 index 00000000000..d490085e615 --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/zkstack.fish @@ -0,0 +1,701 @@ +# Print an optspec for argparse to handle cmd's options that are independent of any subcommand. +function __fish_zkstack_global_optspecs + string join \n v/verbose chain= ignore-prerequisites h/help V/version +end + +function __fish_zkstack_needs_command + # Figure out if the current invocation already has a command. + set -l cmd (commandline -opc) + set -e cmd[1] + argparse -s (__fish_zkstack_global_optspecs) -- $cmd 2>/dev/null + or return + if set -q argv[1] + # Also print the command, so this can be used to figure out what it is. + echo $argv[1] + return 1 + end + return 0 +end + +function __fish_zkstack_using_subcommand + set -l cmd (__fish_zkstack_needs_command) + test -z "$cmd" + and return 1 + contains -- $cmd[1] $argv +end + +complete -c zkstack -n "__fish_zkstack_needs_command" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_needs_command" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_needs_command" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_needs_command" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_needs_command" -s V -l version -d 'Print version' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "autocomplete" -d 'Create shell autocompletion files' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "ecosystem" -d 'Ecosystem related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "chain" -d 'Chain related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "dev" -d 'Supervisor related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "prover" -d 'Prover related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "server" -d 'Run server' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "external-node" -d 'External Node related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "containers" -d 'Run containers for local development' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "contract-verifier" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "portal" -d 'Run dapp-portal' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "explorer" -d 'Run block-explorer' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "consensus" -d 'Consensus utilities' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "update" -d 'Update ZKsync' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "markdown" -d 'Print markdown help' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l generate -d 'The shell to generate the autocomplete script for' -r -f -a "{bash\t'',elvish\t'',fish\t'',powershell\t'',zsh\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s o -l out -d 'The out directory to write the autocomplete script to' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l ecosystem-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l l1-network -d 'L1 Network' -r -f -a "{localhost\t'',sepolia\t'',holesky\t'',mainnet\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l link-to-code -d 'Code link' -r -f -a "(__fish_complete_directories)" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain-id -d 'Chain ID' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l prover-mode -d 'Prover options' -r -f -a "{no-proofs\t'',gpu\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l wallet-creation -d 'Wallet options' -r -f -a "{localhost\t'Load wallets from localhost mnemonic, they are funded for localhost env',random\t'Generate random wallets',empty\t'Generate placeholder wallets',in-file\t'Specify file with wallets'}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l wallet-path -d 'Wallet path' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l l1-batch-commit-data-generator-mode -d 'Commit data generation mode' -r -f -a "{rollup\t'',validium\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-address -d 'Base token address' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l start-containers -d 'Start reth and postgres containers after creation' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l legacy-bridge +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l sender -d 'Address of the transaction sender' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s o -l out -d 'Output directory for the generated files' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-erc20 -d 'Deploy ERC20 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-ecosystem -d 'Deploy ecosystem contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ecosystem-contracts-path -d 'Path to ecosystem contracts' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-paymaster -d 'Deploy Paymaster contract' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s o -l observability -d 'Enable Grafana' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s u -l use-default -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ecosystem-only -d 'Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand)' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l dev -d 'Deploy ecosystem using all defaults. Suitable for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain-id -d 'Chain ID' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l prover-mode -d 'Prover options' -r -f -a "{no-proofs\t'',gpu\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l wallet-creation -d 'Wallet options' -r -f -a "{localhost\t'Load wallets from localhost mnemonic, they are funded for localhost env',random\t'Generate random wallets',empty\t'Generate placeholder wallets',in-file\t'Specify file with wallets'}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l wallet-path -d 'Wallet path' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l l1-batch-commit-data-generator-mode -d 'Commit data generation mode' -r -f -a "{rollup\t'',validium\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-address -d 'Base token address' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l legacy-bridge +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s o -l out -d 'Output directory for the generated files' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l deploy-paymaster -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s u -l use-default -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -f -a "configs" -d 'Initialize chain configs' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s u -l use-default -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "init-database" -d 'Initialize databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "server" -d 'Runs server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "check-sqlx-data" -d 'Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "drop" -d 'Drop databases. If no databases are selected, all databases will be dropped.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "migrate" -d 'Migrate databases. If no databases are selected, all databases will be migrated.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "new-migration" -d 'Create new migration' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "prepare" -d 'Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "reset" -d 'Reset databases. If no databases are selected, all databases will be reset.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "setup" -d 'Setup databases. If no databases are selected, all databases will be setup.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "integration" -d 'Run integration tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "fees" -d 'Run fees test' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "revert" -d 'Run revert tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "recovery" -d 'Run recovery tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "upgrade" -d 'Run upgrade tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "build" -d 'Build all test dependencies' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "rust" -d 'Run unit-tests, accepts optional cargo test flags' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "l1-contracts" -d 'Run L1 contracts tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "prover" -d 'Run prover tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "wallet" -d 'Print test wallets information' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "loadtest" -d 'Run loadtest' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "all" -d 'Remove containers and contracts cache' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "containers" -d 'Remove containers and docker volumes' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "contracts-cache" -d 'Remove contracts caches' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -f -a "create" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s t -l targets -r -f -a "{md\t'',sol\t'',js\t'',ts\t'',rs\t'',contracts\t'',autocompletion\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s c -l check +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s c -l check +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "rustfmt" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "contract" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "prettier" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "info" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "insert-batch" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "insert-version" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l1-contracts -d 'Build L1 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l2-contracts -d 'Build L2 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l system-contracts -d 'Build system contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l test-contracts -d 'Build test contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s p -l path -d 'Path to the config file to override' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l file -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l private-key -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l l1-rpc-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l confirmations -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s u -l url -d 'URL of the health check endpoint' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -f -a "ports" -d 'Show used ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l proof-store-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bucket-base-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l credentials-file -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bucket-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l location -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l project-id -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l shall-save-to-public-bucket -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-store-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-bucket-base-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-credentials-file -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-bucket-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-location -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-project-id -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bellman-cuda-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bellman-cuda -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-compressor-key -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l path -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l region -r -f -a "{us\t'',europe\t'',asia\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l mode -r -f -a "{download\t'',generate\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-keys -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-database -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l prover-db-url -d 'Prover database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l prover-db-name -d 'Prover database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s u -l use-default -d 'Use default database urls and names' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s d -l dont-drop -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l cloud-type -r -f -a "{gcp\t'',local\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l dev +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l clone +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l region -r -f -a "{us\t'',europe\t'',asia\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l mode -r -f -a "{download\t'',generate\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l component -r -f -a "{gateway\t'',witness-generator\t'',witness-vector-generator\t'',prover\t'',circuit-prover\t'',compressor\t'',prover-job-monitor\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l round -r -f -a "{all-rounds\t'',basic-circuits\t'',leaf-aggregation\t'',node-aggregation\t'',recursion-tip\t'',scheduler\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l threads -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l max-allocation -d 'Memory allocation limit in bytes (for prover component)' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l witness-vector-generator-count -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l max-allocation -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l docker -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l tag -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l bellman-cuda-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l clone +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l path -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l components -d 'Components of server to run' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -s a -l additional-args -d 'Additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l genesis -d 'Run server in genesis mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l build -d 'Build server but don\'t run it' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l uring -d 'Enables uring support for RocksDB' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l db-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l db-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l l1-rpc-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s u -l use-default -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l components -d 'Components of server to run' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l enable-consensus -d 'Enable consensus' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s a -l additional-args -d 'Additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l reinit +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s o -l observability -d 'Enable Grafana' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l zksolc-version -d 'Version of zksolc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l zkvyper-version -d 'Version of zkvyper to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l solc-version -d 'Version of solc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l era-vm-solc-version -d 'Version of era vm solc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l vyper-version -d 'Version of vyper to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l only -d 'Install only provided compilers' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l from-file -d 'Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in `commands/consensus/proto/mod.proto`' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l from-genesis -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s c -l only-config -d 'Update only the config files' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "autocomplete" -d 'Create shell autocompletion files' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "ecosystem" -d 'Ecosystem related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "chain" -d 'Chain related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "dev" -d 'Supervisor related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "prover" -d 'Prover related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "server" -d 'Run server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "external-node" -d 'External Node related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "containers" -d 'Run containers for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "contract-verifier" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "portal" -d 'Run dapp-portal' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "explorer" -d 'Run block-explorer' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "consensus" -d 'Consensus utilities' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "update" -d 'Update ZKsync' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "markdown" -d 'Print markdown help' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from consensus" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from consensus" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.sh b/zkstack_cli/crates/zkstack/completion/zkstack.sh new file mode 100644 index 00000000000..27639acd50b --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/zkstack.sh @@ -0,0 +1,6998 @@ +_zkstack() { + local i cur prev opts cmd + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + cmd="" + opts="" + + for i in ${COMP_WORDS[@]} + do + case "${cmd},${i}" in + ",$1") + cmd="zkstack" + ;; + zkstack,autocomplete) + cmd="zkstack__autocomplete" + ;; + zkstack,chain) + cmd="zkstack__chain" + ;; + zkstack,consensus) + cmd="zkstack__consensus" + ;; + zkstack,containers) + cmd="zkstack__containers" + ;; + zkstack,contract-verifier) + cmd="zkstack__contract__verifier" + ;; + zkstack,dev) + cmd="zkstack__dev" + ;; + zkstack,ecosystem) + cmd="zkstack__ecosystem" + ;; + zkstack,explorer) + cmd="zkstack__explorer" + ;; + zkstack,external-node) + cmd="zkstack__external__node" + ;; + zkstack,help) + cmd="zkstack__help" + ;; + zkstack,markdown) + cmd="zkstack__markdown" + ;; + zkstack,portal) + cmd="zkstack__portal" + ;; + zkstack,prover) + cmd="zkstack__prover" + ;; + zkstack,server) + cmd="zkstack__server" + ;; + zkstack,update) + cmd="zkstack__update" + ;; + zkstack__chain,accept-chain-ownership) + cmd="zkstack__chain__accept__chain__ownership" + ;; + zkstack__chain,build-transactions) + cmd="zkstack__chain__build__transactions" + ;; + zkstack__chain,create) + cmd="zkstack__chain__create" + ;; + zkstack__chain,deploy-consensus-registry) + cmd="zkstack__chain__deploy__consensus__registry" + ;; + zkstack__chain,deploy-l2-contracts) + cmd="zkstack__chain__deploy__l2__contracts" + ;; + zkstack__chain,deploy-multicall3) + cmd="zkstack__chain__deploy__multicall3" + ;; + zkstack__chain,deploy-paymaster) + cmd="zkstack__chain__deploy__paymaster" + ;; + zkstack__chain,deploy-upgrader) + cmd="zkstack__chain__deploy__upgrader" + ;; + zkstack__chain,genesis) + cmd="zkstack__chain__genesis" + ;; + zkstack__chain,help) + cmd="zkstack__chain__help" + ;; + zkstack__chain,init) + cmd="zkstack__chain__init" + ;; + zkstack__chain,initialize-bridges) + cmd="zkstack__chain__initialize__bridges" + ;; + zkstack__chain,register-chain) + cmd="zkstack__chain__register__chain" + ;; + zkstack__chain,update-token-multiplier-setter) + cmd="zkstack__chain__update__token__multiplier__setter" + ;; + zkstack__chain__genesis,help) + cmd="zkstack__chain__genesis__help" + ;; + zkstack__chain__genesis,init-database) + cmd="zkstack__chain__genesis__init__database" + ;; + zkstack__chain__genesis,server) + cmd="zkstack__chain__genesis__server" + ;; + zkstack__chain__genesis__help,help) + cmd="zkstack__chain__genesis__help__help" + ;; + zkstack__chain__genesis__help,init-database) + cmd="zkstack__chain__genesis__help__init__database" + ;; + zkstack__chain__genesis__help,server) + cmd="zkstack__chain__genesis__help__server" + ;; + zkstack__chain__help,accept-chain-ownership) + cmd="zkstack__chain__help__accept__chain__ownership" + ;; + zkstack__chain__help,build-transactions) + cmd="zkstack__chain__help__build__transactions" + ;; + zkstack__chain__help,create) + cmd="zkstack__chain__help__create" + ;; + zkstack__chain__help,deploy-consensus-registry) + cmd="zkstack__chain__help__deploy__consensus__registry" + ;; + zkstack__chain__help,deploy-l2-contracts) + cmd="zkstack__chain__help__deploy__l2__contracts" + ;; + zkstack__chain__help,deploy-multicall3) + cmd="zkstack__chain__help__deploy__multicall3" + ;; + zkstack__chain__help,deploy-paymaster) + cmd="zkstack__chain__help__deploy__paymaster" + ;; + zkstack__chain__help,deploy-upgrader) + cmd="zkstack__chain__help__deploy__upgrader" + ;; + zkstack__chain__help,genesis) + cmd="zkstack__chain__help__genesis" + ;; + zkstack__chain__help,help) + cmd="zkstack__chain__help__help" + ;; + zkstack__chain__help,init) + cmd="zkstack__chain__help__init" + ;; + zkstack__chain__help,initialize-bridges) + cmd="zkstack__chain__help__initialize__bridges" + ;; + zkstack__chain__help,register-chain) + cmd="zkstack__chain__help__register__chain" + ;; + zkstack__chain__help,update-token-multiplier-setter) + cmd="zkstack__chain__help__update__token__multiplier__setter" + ;; + zkstack__chain__help__genesis,init-database) + cmd="zkstack__chain__help__genesis__init__database" + ;; + zkstack__chain__help__genesis,server) + cmd="zkstack__chain__help__genesis__server" + ;; + zkstack__chain__help__init,configs) + cmd="zkstack__chain__help__init__configs" + ;; + zkstack__chain__init,configs) + cmd="zkstack__chain__init__configs" + ;; + zkstack__chain__init,help) + cmd="zkstack__chain__init__help" + ;; + zkstack__chain__init__help,configs) + cmd="zkstack__chain__init__help__configs" + ;; + zkstack__chain__init__help,help) + cmd="zkstack__chain__init__help__help" + ;; + zkstack__consensus,get-attester-committee) + cmd="zkstack__consensus__get__attester__committee" + ;; + zkstack__consensus,help) + cmd="zkstack__consensus__help" + ;; + zkstack__consensus,set-attester-committee) + cmd="zkstack__consensus__set__attester__committee" + ;; + zkstack__consensus__help,get-attester-committee) + cmd="zkstack__consensus__help__get__attester__committee" + ;; + zkstack__consensus__help,help) + cmd="zkstack__consensus__help__help" + ;; + zkstack__consensus__help,set-attester-committee) + cmd="zkstack__consensus__help__set__attester__committee" + ;; + zkstack__contract__verifier,help) + cmd="zkstack__contract__verifier__help" + ;; + zkstack__contract__verifier,init) + cmd="zkstack__contract__verifier__init" + ;; + zkstack__contract__verifier,run) + cmd="zkstack__contract__verifier__run" + ;; + zkstack__contract__verifier__help,help) + cmd="zkstack__contract__verifier__help__help" + ;; + zkstack__contract__verifier__help,init) + cmd="zkstack__contract__verifier__help__init" + ;; + zkstack__contract__verifier__help,run) + cmd="zkstack__contract__verifier__help__run" + ;; + zkstack__dev,clean) + cmd="zkstack__dev__clean" + ;; + zkstack__dev,config-writer) + cmd="zkstack__dev__config__writer" + ;; + zkstack__dev,contracts) + cmd="zkstack__dev__contracts" + ;; + zkstack__dev,database) + cmd="zkstack__dev__database" + ;; + zkstack__dev,fmt) + cmd="zkstack__dev__fmt" + ;; + zkstack__dev,generate-genesis) + cmd="zkstack__dev__generate__genesis" + ;; + zkstack__dev,help) + cmd="zkstack__dev__help" + ;; + zkstack__dev,lint) + cmd="zkstack__dev__lint" + ;; + zkstack__dev,prover) + cmd="zkstack__dev__prover" + ;; + zkstack__dev,send-transactions) + cmd="zkstack__dev__send__transactions" + ;; + zkstack__dev,snapshot) + cmd="zkstack__dev__snapshot" + ;; + zkstack__dev,status) + cmd="zkstack__dev__status" + ;; + zkstack__dev,test) + cmd="zkstack__dev__test" + ;; + zkstack__dev__clean,all) + cmd="zkstack__dev__clean__all" + ;; + zkstack__dev__clean,containers) + cmd="zkstack__dev__clean__containers" + ;; + zkstack__dev__clean,contracts-cache) + cmd="zkstack__dev__clean__contracts__cache" + ;; + zkstack__dev__clean,help) + cmd="zkstack__dev__clean__help" + ;; + zkstack__dev__clean__help,all) + cmd="zkstack__dev__clean__help__all" + ;; + zkstack__dev__clean__help,containers) + cmd="zkstack__dev__clean__help__containers" + ;; + zkstack__dev__clean__help,contracts-cache) + cmd="zkstack__dev__clean__help__contracts__cache" + ;; + zkstack__dev__clean__help,help) + cmd="zkstack__dev__clean__help__help" + ;; + zkstack__dev__database,check-sqlx-data) + cmd="zkstack__dev__database__check__sqlx__data" + ;; + zkstack__dev__database,drop) + cmd="zkstack__dev__database__drop" + ;; + zkstack__dev__database,help) + cmd="zkstack__dev__database__help" + ;; + zkstack__dev__database,migrate) + cmd="zkstack__dev__database__migrate" + ;; + zkstack__dev__database,new-migration) + cmd="zkstack__dev__database__new__migration" + ;; + zkstack__dev__database,prepare) + cmd="zkstack__dev__database__prepare" + ;; + zkstack__dev__database,reset) + cmd="zkstack__dev__database__reset" + ;; + zkstack__dev__database,setup) + cmd="zkstack__dev__database__setup" + ;; + zkstack__dev__database__help,check-sqlx-data) + cmd="zkstack__dev__database__help__check__sqlx__data" + ;; + zkstack__dev__database__help,drop) + cmd="zkstack__dev__database__help__drop" + ;; + zkstack__dev__database__help,help) + cmd="zkstack__dev__database__help__help" + ;; + zkstack__dev__database__help,migrate) + cmd="zkstack__dev__database__help__migrate" + ;; + zkstack__dev__database__help,new-migration) + cmd="zkstack__dev__database__help__new__migration" + ;; + zkstack__dev__database__help,prepare) + cmd="zkstack__dev__database__help__prepare" + ;; + zkstack__dev__database__help,reset) + cmd="zkstack__dev__database__help__reset" + ;; + zkstack__dev__database__help,setup) + cmd="zkstack__dev__database__help__setup" + ;; + zkstack__dev__fmt,contract) + cmd="zkstack__dev__fmt__contract" + ;; + zkstack__dev__fmt,help) + cmd="zkstack__dev__fmt__help" + ;; + zkstack__dev__fmt,prettier) + cmd="zkstack__dev__fmt__prettier" + ;; + zkstack__dev__fmt,rustfmt) + cmd="zkstack__dev__fmt__rustfmt" + ;; + zkstack__dev__fmt__help,contract) + cmd="zkstack__dev__fmt__help__contract" + ;; + zkstack__dev__fmt__help,help) + cmd="zkstack__dev__fmt__help__help" + ;; + zkstack__dev__fmt__help,prettier) + cmd="zkstack__dev__fmt__help__prettier" + ;; + zkstack__dev__fmt__help,rustfmt) + cmd="zkstack__dev__fmt__help__rustfmt" + ;; + zkstack__dev__help,clean) + cmd="zkstack__dev__help__clean" + ;; + zkstack__dev__help,config-writer) + cmd="zkstack__dev__help__config__writer" + ;; + zkstack__dev__help,contracts) + cmd="zkstack__dev__help__contracts" + ;; + zkstack__dev__help,database) + cmd="zkstack__dev__help__database" + ;; + zkstack__dev__help,fmt) + cmd="zkstack__dev__help__fmt" + ;; + zkstack__dev__help,generate-genesis) + cmd="zkstack__dev__help__generate__genesis" + ;; + zkstack__dev__help,help) + cmd="zkstack__dev__help__help" + ;; + zkstack__dev__help,lint) + cmd="zkstack__dev__help__lint" + ;; + zkstack__dev__help,prover) + cmd="zkstack__dev__help__prover" + ;; + zkstack__dev__help,send-transactions) + cmd="zkstack__dev__help__send__transactions" + ;; + zkstack__dev__help,snapshot) + cmd="zkstack__dev__help__snapshot" + ;; + zkstack__dev__help,status) + cmd="zkstack__dev__help__status" + ;; + zkstack__dev__help,test) + cmd="zkstack__dev__help__test" + ;; + zkstack__dev__help__clean,all) + cmd="zkstack__dev__help__clean__all" + ;; + zkstack__dev__help__clean,containers) + cmd="zkstack__dev__help__clean__containers" + ;; + zkstack__dev__help__clean,contracts-cache) + cmd="zkstack__dev__help__clean__contracts__cache" + ;; + zkstack__dev__help__database,check-sqlx-data) + cmd="zkstack__dev__help__database__check__sqlx__data" + ;; + zkstack__dev__help__database,drop) + cmd="zkstack__dev__help__database__drop" + ;; + zkstack__dev__help__database,migrate) + cmd="zkstack__dev__help__database__migrate" + ;; + zkstack__dev__help__database,new-migration) + cmd="zkstack__dev__help__database__new__migration" + ;; + zkstack__dev__help__database,prepare) + cmd="zkstack__dev__help__database__prepare" + ;; + zkstack__dev__help__database,reset) + cmd="zkstack__dev__help__database__reset" + ;; + zkstack__dev__help__database,setup) + cmd="zkstack__dev__help__database__setup" + ;; + zkstack__dev__help__fmt,contract) + cmd="zkstack__dev__help__fmt__contract" + ;; + zkstack__dev__help__fmt,prettier) + cmd="zkstack__dev__help__fmt__prettier" + ;; + zkstack__dev__help__fmt,rustfmt) + cmd="zkstack__dev__help__fmt__rustfmt" + ;; + zkstack__dev__help__prover,info) + cmd="zkstack__dev__help__prover__info" + ;; + zkstack__dev__help__prover,insert-batch) + cmd="zkstack__dev__help__prover__insert__batch" + ;; + zkstack__dev__help__prover,insert-version) + cmd="zkstack__dev__help__prover__insert__version" + ;; + zkstack__dev__help__snapshot,create) + cmd="zkstack__dev__help__snapshot__create" + ;; + zkstack__dev__help__status,ports) + cmd="zkstack__dev__help__status__ports" + ;; + zkstack__dev__help__test,build) + cmd="zkstack__dev__help__test__build" + ;; + zkstack__dev__help__test,fees) + cmd="zkstack__dev__help__test__fees" + ;; + zkstack__dev__help__test,integration) + cmd="zkstack__dev__help__test__integration" + ;; + zkstack__dev__help__test,l1-contracts) + cmd="zkstack__dev__help__test__l1__contracts" + ;; + zkstack__dev__help__test,loadtest) + cmd="zkstack__dev__help__test__loadtest" + ;; + zkstack__dev__help__test,prover) + cmd="zkstack__dev__help__test__prover" + ;; + zkstack__dev__help__test,recovery) + cmd="zkstack__dev__help__test__recovery" + ;; + zkstack__dev__help__test,revert) + cmd="zkstack__dev__help__test__revert" + ;; + zkstack__dev__help__test,rust) + cmd="zkstack__dev__help__test__rust" + ;; + zkstack__dev__help__test,upgrade) + cmd="zkstack__dev__help__test__upgrade" + ;; + zkstack__dev__help__test,wallet) + cmd="zkstack__dev__help__test__wallet" + ;; + zkstack__dev__prover,help) + cmd="zkstack__dev__prover__help" + ;; + zkstack__dev__prover,info) + cmd="zkstack__dev__prover__info" + ;; + zkstack__dev__prover,insert-batch) + cmd="zkstack__dev__prover__insert__batch" + ;; + zkstack__dev__prover,insert-version) + cmd="zkstack__dev__prover__insert__version" + ;; + zkstack__dev__prover__help,help) + cmd="zkstack__dev__prover__help__help" + ;; + zkstack__dev__prover__help,info) + cmd="zkstack__dev__prover__help__info" + ;; + zkstack__dev__prover__help,insert-batch) + cmd="zkstack__dev__prover__help__insert__batch" + ;; + zkstack__dev__prover__help,insert-version) + cmd="zkstack__dev__prover__help__insert__version" + ;; + zkstack__dev__snapshot,create) + cmd="zkstack__dev__snapshot__create" + ;; + zkstack__dev__snapshot,help) + cmd="zkstack__dev__snapshot__help" + ;; + zkstack__dev__snapshot__help,create) + cmd="zkstack__dev__snapshot__help__create" + ;; + zkstack__dev__snapshot__help,help) + cmd="zkstack__dev__snapshot__help__help" + ;; + zkstack__dev__status,help) + cmd="zkstack__dev__status__help" + ;; + zkstack__dev__status,ports) + cmd="zkstack__dev__status__ports" + ;; + zkstack__dev__status__help,help) + cmd="zkstack__dev__status__help__help" + ;; + zkstack__dev__status__help,ports) + cmd="zkstack__dev__status__help__ports" + ;; + zkstack__dev__test,build) + cmd="zkstack__dev__test__build" + ;; + zkstack__dev__test,fees) + cmd="zkstack__dev__test__fees" + ;; + zkstack__dev__test,help) + cmd="zkstack__dev__test__help" + ;; + zkstack__dev__test,integration) + cmd="zkstack__dev__test__integration" + ;; + zkstack__dev__test,l1-contracts) + cmd="zkstack__dev__test__l1__contracts" + ;; + zkstack__dev__test,loadtest) + cmd="zkstack__dev__test__loadtest" + ;; + zkstack__dev__test,prover) + cmd="zkstack__dev__test__prover" + ;; + zkstack__dev__test,recovery) + cmd="zkstack__dev__test__recovery" + ;; + zkstack__dev__test,revert) + cmd="zkstack__dev__test__revert" + ;; + zkstack__dev__test,rust) + cmd="zkstack__dev__test__rust" + ;; + zkstack__dev__test,upgrade) + cmd="zkstack__dev__test__upgrade" + ;; + zkstack__dev__test,wallet) + cmd="zkstack__dev__test__wallet" + ;; + zkstack__dev__test__help,build) + cmd="zkstack__dev__test__help__build" + ;; + zkstack__dev__test__help,fees) + cmd="zkstack__dev__test__help__fees" + ;; + zkstack__dev__test__help,help) + cmd="zkstack__dev__test__help__help" + ;; + zkstack__dev__test__help,integration) + cmd="zkstack__dev__test__help__integration" + ;; + zkstack__dev__test__help,l1-contracts) + cmd="zkstack__dev__test__help__l1__contracts" + ;; + zkstack__dev__test__help,loadtest) + cmd="zkstack__dev__test__help__loadtest" + ;; + zkstack__dev__test__help,prover) + cmd="zkstack__dev__test__help__prover" + ;; + zkstack__dev__test__help,recovery) + cmd="zkstack__dev__test__help__recovery" + ;; + zkstack__dev__test__help,revert) + cmd="zkstack__dev__test__help__revert" + ;; + zkstack__dev__test__help,rust) + cmd="zkstack__dev__test__help__rust" + ;; + zkstack__dev__test__help,upgrade) + cmd="zkstack__dev__test__help__upgrade" + ;; + zkstack__dev__test__help,wallet) + cmd="zkstack__dev__test__help__wallet" + ;; + zkstack__ecosystem,build-transactions) + cmd="zkstack__ecosystem__build__transactions" + ;; + zkstack__ecosystem,change-default-chain) + cmd="zkstack__ecosystem__change__default__chain" + ;; + zkstack__ecosystem,create) + cmd="zkstack__ecosystem__create" + ;; + zkstack__ecosystem,help) + cmd="zkstack__ecosystem__help" + ;; + zkstack__ecosystem,init) + cmd="zkstack__ecosystem__init" + ;; + zkstack__ecosystem,setup-observability) + cmd="zkstack__ecosystem__setup__observability" + ;; + zkstack__ecosystem__help,build-transactions) + cmd="zkstack__ecosystem__help__build__transactions" + ;; + zkstack__ecosystem__help,change-default-chain) + cmd="zkstack__ecosystem__help__change__default__chain" + ;; + zkstack__ecosystem__help,create) + cmd="zkstack__ecosystem__help__create" + ;; + zkstack__ecosystem__help,help) + cmd="zkstack__ecosystem__help__help" + ;; + zkstack__ecosystem__help,init) + cmd="zkstack__ecosystem__help__init" + ;; + zkstack__ecosystem__help,setup-observability) + cmd="zkstack__ecosystem__help__setup__observability" + ;; + zkstack__explorer,help) + cmd="zkstack__explorer__help" + ;; + zkstack__explorer,init) + cmd="zkstack__explorer__init" + ;; + zkstack__explorer,run) + cmd="zkstack__explorer__run" + ;; + zkstack__explorer,run-backend) + cmd="zkstack__explorer__run__backend" + ;; + zkstack__explorer__help,help) + cmd="zkstack__explorer__help__help" + ;; + zkstack__explorer__help,init) + cmd="zkstack__explorer__help__init" + ;; + zkstack__explorer__help,run) + cmd="zkstack__explorer__help__run" + ;; + zkstack__explorer__help,run-backend) + cmd="zkstack__explorer__help__run__backend" + ;; + zkstack__external__node,configs) + cmd="zkstack__external__node__configs" + ;; + zkstack__external__node,help) + cmd="zkstack__external__node__help" + ;; + zkstack__external__node,init) + cmd="zkstack__external__node__init" + ;; + zkstack__external__node,run) + cmd="zkstack__external__node__run" + ;; + zkstack__external__node__help,configs) + cmd="zkstack__external__node__help__configs" + ;; + zkstack__external__node__help,help) + cmd="zkstack__external__node__help__help" + ;; + zkstack__external__node__help,init) + cmd="zkstack__external__node__help__init" + ;; + zkstack__external__node__help,run) + cmd="zkstack__external__node__help__run" + ;; + zkstack__help,autocomplete) + cmd="zkstack__help__autocomplete" + ;; + zkstack__help,chain) + cmd="zkstack__help__chain" + ;; + zkstack__help,consensus) + cmd="zkstack__help__consensus" + ;; + zkstack__help,containers) + cmd="zkstack__help__containers" + ;; + zkstack__help,contract-verifier) + cmd="zkstack__help__contract__verifier" + ;; + zkstack__help,dev) + cmd="zkstack__help__dev" + ;; + zkstack__help,ecosystem) + cmd="zkstack__help__ecosystem" + ;; + zkstack__help,explorer) + cmd="zkstack__help__explorer" + ;; + zkstack__help,external-node) + cmd="zkstack__help__external__node" + ;; + zkstack__help,help) + cmd="zkstack__help__help" + ;; + zkstack__help,markdown) + cmd="zkstack__help__markdown" + ;; + zkstack__help,portal) + cmd="zkstack__help__portal" + ;; + zkstack__help,prover) + cmd="zkstack__help__prover" + ;; + zkstack__help,server) + cmd="zkstack__help__server" + ;; + zkstack__help,update) + cmd="zkstack__help__update" + ;; + zkstack__help__chain,accept-chain-ownership) + cmd="zkstack__help__chain__accept__chain__ownership" + ;; + zkstack__help__chain,build-transactions) + cmd="zkstack__help__chain__build__transactions" + ;; + zkstack__help__chain,create) + cmd="zkstack__help__chain__create" + ;; + zkstack__help__chain,deploy-consensus-registry) + cmd="zkstack__help__chain__deploy__consensus__registry" + ;; + zkstack__help__chain,deploy-l2-contracts) + cmd="zkstack__help__chain__deploy__l2__contracts" + ;; + zkstack__help__chain,deploy-multicall3) + cmd="zkstack__help__chain__deploy__multicall3" + ;; + zkstack__help__chain,deploy-paymaster) + cmd="zkstack__help__chain__deploy__paymaster" + ;; + zkstack__help__chain,deploy-upgrader) + cmd="zkstack__help__chain__deploy__upgrader" + ;; + zkstack__help__chain,genesis) + cmd="zkstack__help__chain__genesis" + ;; + zkstack__help__chain,init) + cmd="zkstack__help__chain__init" + ;; + zkstack__help__chain,initialize-bridges) + cmd="zkstack__help__chain__initialize__bridges" + ;; + zkstack__help__chain,register-chain) + cmd="zkstack__help__chain__register__chain" + ;; + zkstack__help__chain,update-token-multiplier-setter) + cmd="zkstack__help__chain__update__token__multiplier__setter" + ;; + zkstack__help__chain__genesis,init-database) + cmd="zkstack__help__chain__genesis__init__database" + ;; + zkstack__help__chain__genesis,server) + cmd="zkstack__help__chain__genesis__server" + ;; + zkstack__help__chain__init,configs) + cmd="zkstack__help__chain__init__configs" + ;; + zkstack__help__consensus,get-attester-committee) + cmd="zkstack__help__consensus__get__attester__committee" + ;; + zkstack__help__consensus,set-attester-committee) + cmd="zkstack__help__consensus__set__attester__committee" + ;; + zkstack__help__contract__verifier,init) + cmd="zkstack__help__contract__verifier__init" + ;; + zkstack__help__contract__verifier,run) + cmd="zkstack__help__contract__verifier__run" + ;; + zkstack__help__dev,clean) + cmd="zkstack__help__dev__clean" + ;; + zkstack__help__dev,config-writer) + cmd="zkstack__help__dev__config__writer" + ;; + zkstack__help__dev,contracts) + cmd="zkstack__help__dev__contracts" + ;; + zkstack__help__dev,database) + cmd="zkstack__help__dev__database" + ;; + zkstack__help__dev,fmt) + cmd="zkstack__help__dev__fmt" + ;; + zkstack__help__dev,generate-genesis) + cmd="zkstack__help__dev__generate__genesis" + ;; + zkstack__help__dev,lint) + cmd="zkstack__help__dev__lint" + ;; + zkstack__help__dev,prover) + cmd="zkstack__help__dev__prover" + ;; + zkstack__help__dev,send-transactions) + cmd="zkstack__help__dev__send__transactions" + ;; + zkstack__help__dev,snapshot) + cmd="zkstack__help__dev__snapshot" + ;; + zkstack__help__dev,status) + cmd="zkstack__help__dev__status" + ;; + zkstack__help__dev,test) + cmd="zkstack__help__dev__test" + ;; + zkstack__help__dev__clean,all) + cmd="zkstack__help__dev__clean__all" + ;; + zkstack__help__dev__clean,containers) + cmd="zkstack__help__dev__clean__containers" + ;; + zkstack__help__dev__clean,contracts-cache) + cmd="zkstack__help__dev__clean__contracts__cache" + ;; + zkstack__help__dev__database,check-sqlx-data) + cmd="zkstack__help__dev__database__check__sqlx__data" + ;; + zkstack__help__dev__database,drop) + cmd="zkstack__help__dev__database__drop" + ;; + zkstack__help__dev__database,migrate) + cmd="zkstack__help__dev__database__migrate" + ;; + zkstack__help__dev__database,new-migration) + cmd="zkstack__help__dev__database__new__migration" + ;; + zkstack__help__dev__database,prepare) + cmd="zkstack__help__dev__database__prepare" + ;; + zkstack__help__dev__database,reset) + cmd="zkstack__help__dev__database__reset" + ;; + zkstack__help__dev__database,setup) + cmd="zkstack__help__dev__database__setup" + ;; + zkstack__help__dev__fmt,contract) + cmd="zkstack__help__dev__fmt__contract" + ;; + zkstack__help__dev__fmt,prettier) + cmd="zkstack__help__dev__fmt__prettier" + ;; + zkstack__help__dev__fmt,rustfmt) + cmd="zkstack__help__dev__fmt__rustfmt" + ;; + zkstack__help__dev__prover,info) + cmd="zkstack__help__dev__prover__info" + ;; + zkstack__help__dev__prover,insert-batch) + cmd="zkstack__help__dev__prover__insert__batch" + ;; + zkstack__help__dev__prover,insert-version) + cmd="zkstack__help__dev__prover__insert__version" + ;; + zkstack__help__dev__snapshot,create) + cmd="zkstack__help__dev__snapshot__create" + ;; + zkstack__help__dev__status,ports) + cmd="zkstack__help__dev__status__ports" + ;; + zkstack__help__dev__test,build) + cmd="zkstack__help__dev__test__build" + ;; + zkstack__help__dev__test,fees) + cmd="zkstack__help__dev__test__fees" + ;; + zkstack__help__dev__test,integration) + cmd="zkstack__help__dev__test__integration" + ;; + zkstack__help__dev__test,l1-contracts) + cmd="zkstack__help__dev__test__l1__contracts" + ;; + zkstack__help__dev__test,loadtest) + cmd="zkstack__help__dev__test__loadtest" + ;; + zkstack__help__dev__test,prover) + cmd="zkstack__help__dev__test__prover" + ;; + zkstack__help__dev__test,recovery) + cmd="zkstack__help__dev__test__recovery" + ;; + zkstack__help__dev__test,revert) + cmd="zkstack__help__dev__test__revert" + ;; + zkstack__help__dev__test,rust) + cmd="zkstack__help__dev__test__rust" + ;; + zkstack__help__dev__test,upgrade) + cmd="zkstack__help__dev__test__upgrade" + ;; + zkstack__help__dev__test,wallet) + cmd="zkstack__help__dev__test__wallet" + ;; + zkstack__help__ecosystem,build-transactions) + cmd="zkstack__help__ecosystem__build__transactions" + ;; + zkstack__help__ecosystem,change-default-chain) + cmd="zkstack__help__ecosystem__change__default__chain" + ;; + zkstack__help__ecosystem,create) + cmd="zkstack__help__ecosystem__create" + ;; + zkstack__help__ecosystem,init) + cmd="zkstack__help__ecosystem__init" + ;; + zkstack__help__ecosystem,setup-observability) + cmd="zkstack__help__ecosystem__setup__observability" + ;; + zkstack__help__explorer,init) + cmd="zkstack__help__explorer__init" + ;; + zkstack__help__explorer,run) + cmd="zkstack__help__explorer__run" + ;; + zkstack__help__explorer,run-backend) + cmd="zkstack__help__explorer__run__backend" + ;; + zkstack__help__external__node,configs) + cmd="zkstack__help__external__node__configs" + ;; + zkstack__help__external__node,init) + cmd="zkstack__help__external__node__init" + ;; + zkstack__help__external__node,run) + cmd="zkstack__help__external__node__run" + ;; + zkstack__help__prover,compressor-keys) + cmd="zkstack__help__prover__compressor__keys" + ;; + zkstack__help__prover,init) + cmd="zkstack__help__prover__init" + ;; + zkstack__help__prover,init-bellman-cuda) + cmd="zkstack__help__prover__init__bellman__cuda" + ;; + zkstack__help__prover,run) + cmd="zkstack__help__prover__run" + ;; + zkstack__help__prover,setup-keys) + cmd="zkstack__help__prover__setup__keys" + ;; + zkstack__prover,compressor-keys) + cmd="zkstack__prover__compressor__keys" + ;; + zkstack__prover,help) + cmd="zkstack__prover__help" + ;; + zkstack__prover,init) + cmd="zkstack__prover__init" + ;; + zkstack__prover,init-bellman-cuda) + cmd="zkstack__prover__init__bellman__cuda" + ;; + zkstack__prover,run) + cmd="zkstack__prover__run" + ;; + zkstack__prover,setup-keys) + cmd="zkstack__prover__setup__keys" + ;; + zkstack__prover__help,compressor-keys) + cmd="zkstack__prover__help__compressor__keys" + ;; + zkstack__prover__help,help) + cmd="zkstack__prover__help__help" + ;; + zkstack__prover__help,init) + cmd="zkstack__prover__help__init" + ;; + zkstack__prover__help,init-bellman-cuda) + cmd="zkstack__prover__help__init__bellman__cuda" + ;; + zkstack__prover__help,run) + cmd="zkstack__prover__help__run" + ;; + zkstack__prover__help,setup-keys) + cmd="zkstack__prover__help__setup__keys" + ;; + *) + ;; + esac + done + + case "${cmd}" in + zkstack) + opts="-v -h -V --verbose --chain --ignore-prerequisites --help --version autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__autocomplete) + opts="-o -v -h --generate --out --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --generate) + COMPREPLY=($(compgen -W "bash elvish fish powershell zsh" -- "${cur}")) + return 0 + ;; + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain) + opts="-v -h --verbose --chain --ignore-prerequisites --help create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__accept__chain__ownership) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__build__transactions) + opts="-o -a -v -h --out --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --l1-rpc-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__create) + opts="-v -h --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-mode) + COMPREPLY=($(compgen -W "no-proofs gpu" -- "${cur}")) + return 0 + ;; + --wallet-creation) + COMPREPLY=($(compgen -W "localhost random empty in-file" -- "${cur}")) + return 0 + ;; + --wallet-path) + local oldifs + if [ -n "${IFS+x}" ]; then + oldifs="$IFS" + fi + IFS=$'\n' + COMPREPLY=($(compgen -f "${cur}")) + if [ -n "${oldifs+x}" ]; then + IFS="$oldifs" + fi + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o filenames + fi + return 0 + ;; + --l1-batch-commit-data-generator-mode) + COMPREPLY=($(compgen -W "rollup validium" -- "${cur}")) + return 0 + ;; + --base-token-address) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-nominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-denominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --set-as-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__consensus__registry) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__l2__contracts) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__multicall3) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__paymaster) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__upgrader) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis) + opts="-u -d -v -h --server-db-url --server-db-name --use-default --dont-drop --verbose --chain --ignore-prerequisites --help init-database server help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help) + opts="init-database server help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__init__database) + opts="-u -d -v -h --server-db-url --server-db-name --use-default --dont-drop --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__server) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help) + opts="create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__accept__chain__ownership) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__consensus__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__l2__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__multicall3) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__paymaster) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__upgrader) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis) + opts="init-database server" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__init) + opts="configs" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__init__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__initialize__bridges) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__register__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__update__token__multiplier__setter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init) + opts="-a -u -d -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --server-db-url --server-db-name --use-default --dont-drop --deploy-paymaster --l1-rpc-url --no-port-reallocation --verbose --chain --ignore-prerequisites --help configs help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --deploy-paymaster) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__configs) + opts="-u -d -v -h --server-db-url --server-db-name --use-default --dont-drop --l1-rpc-url --no-port-reallocation --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help) + opts="configs help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__initialize__bridges) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__register__chain) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__update__token__multiplier__setter) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus) + opts="-v -h --verbose --chain --ignore-prerequisites --help set-attester-committee get-attester-committee help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__get__attester__committee) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help) + opts="set-attester-committee get-attester-committee help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__get__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__set__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__set__attester__committee) + opts="-v -h --from-genesis --from-file --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --from-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__containers) + opts="-o -v -h --observability --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --observability) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier) + opts="-v -h --verbose --chain --ignore-prerequisites --help run init help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help) + opts="run init help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__init) + opts="-v -h --zksolc-version --zkvyper-version --solc-version --era-vm-solc-version --vyper-version --only --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --zksolc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --zkvyper-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --solc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --era-vm-solc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --vyper-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__run) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev) + opts="-v -h --verbose --chain --ignore-prerequisites --help database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean) + opts="-v -h --verbose --chain --ignore-prerequisites --help all containers contracts-cache help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__all) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__containers) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__contracts__cache) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help) + opts="all containers contracts-cache help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__config__writer) + opts="-p -v -h --path --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__contracts) + opts="-v -h --l1-contracts --l2-contracts --system-contracts --test-contracts --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --l1-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l2-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --system-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --test-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database) + opts="-v -h --verbose --chain --ignore-prerequisites --help check-sqlx-data drop migrate new-migration prepare reset setup help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__check__sqlx__data) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__drop) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help) + opts="check-sqlx-data drop migrate new-migration prepare reset setup help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__migrate) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__new__migration) + opts="-v -h --database --name --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --database) + COMPREPLY=($(compgen -W "prover core" -- "${cur}")) + return 0 + ;; + --name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__prepare) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__reset) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__setup) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt) + opts="-c -v -h --check --verbose --chain --ignore-prerequisites --help rustfmt contract prettier help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__contract) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help) + opts="rustfmt contract prettier help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__prettier) + opts="-t -v -h --targets --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --targets) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__rustfmt) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__generate__genesis) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help) + opts="database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean) + opts="all containers contracts-cache" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__config__writer) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database) + opts="check-sqlx-data drop migrate new-migration prepare reset setup" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt) + opts="rustfmt contract prettier" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__generate__genesis) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__lint) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover) + opts="info insert-batch insert-version" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__send__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__snapshot) + opts="create" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__snapshot__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__status) + opts="ports" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__status__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__lint) + opts="-c -t -v -h --check --targets --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --targets) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help info insert-batch insert-version help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help) + opts="info insert-batch insert-version help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__info) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__insert__batch) + opts="-v -h --number --default --version --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --number) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__insert__version) + opts="-v -h --default --version --snark-wrapper --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --snark-wrapper) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__send__transactions) + opts="-v -h --file --private-key --l1-rpc-url --confirmations --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --private-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --confirmations) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot) + opts="-v -h --verbose --chain --ignore-prerequisites --help create help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__create) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help) + opts="create help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status) + opts="-u -v -h --url --verbose --chain --ignore-prerequisites --help ports help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -u) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help) + opts="ports help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__ports) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test) + opts="-v -h --verbose --chain --ignore-prerequisites --help integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__build) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__fees) + opts="-n -v -h --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__integration) + opts="-e -n -t -v -h --external-node --no-deps --test-pattern --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --test-pattern) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__l1__contracts) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__loadtest) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__recovery) + opts="-s -n -v -h --snapshot --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__revert) + opts="-e -n -v -h --enable-consensus --external-node --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__rust) + opts="-v -h --options --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --options) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__upgrade) + opts="-n -v -h --no-deps --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__wallet) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem) + opts="-v -h --verbose --chain --ignore-prerequisites --help create build-transactions init change-default-chain setup-observability help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__build__transactions) + opts="-o -a -v -h --sender --l1-rpc-url --out --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --sender) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__change__default__chain) + opts="-v -h --verbose --chain --ignore-prerequisites --help [NAME]" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__create) + opts="-v -h --ecosystem-name --l1-network --link-to-code --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --start-containers --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --ecosystem-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-network) + COMPREPLY=($(compgen -W "localhost sepolia holesky mainnet" -- "${cur}")) + return 0 + ;; + --link-to-code) + COMPREPLY=() + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o plusdirs + fi + return 0 + ;; + --chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-mode) + COMPREPLY=($(compgen -W "no-proofs gpu" -- "${cur}")) + return 0 + ;; + --wallet-creation) + COMPREPLY=($(compgen -W "localhost random empty in-file" -- "${cur}")) + return 0 + ;; + --wallet-path) + local oldifs + if [ -n "${IFS+x}" ]; then + oldifs="$IFS" + fi + IFS=$'\n' + COMPREPLY=($(compgen -f "${cur}")) + if [ -n "${oldifs+x}" ]; then + IFS="$oldifs" + fi + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o filenames + fi + return 0 + ;; + --l1-batch-commit-data-generator-mode) + COMPREPLY=($(compgen -W "rollup validium" -- "${cur}")) + return 0 + ;; + --base-token-address) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-nominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-denominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --set-as-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --start-containers) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help) + opts="create build-transactions init change-default-chain setup-observability help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__change__default__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__setup__observability) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__init) + opts="-a -u -d -o -v -h --deploy-erc20 --deploy-ecosystem --ecosystem-contracts-path --l1-rpc-url --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --deploy-paymaster --server-db-url --server-db-name --use-default --dont-drop --ecosystem-only --dev --observability --no-port-reallocation --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --deploy-erc20) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --deploy-ecosystem) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --ecosystem-contracts-path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --deploy-paymaster) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --observability) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__setup__observability) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer) + opts="-v -h --verbose --chain --ignore-prerequisites --help init run-backend run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help) + opts="init run-backend run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__run__backend) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__init) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__run) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__run__backend) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node) + opts="-v -h --verbose --chain --ignore-prerequisites --help configs init run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__configs) + opts="-u -v -h --db-url --db-name --l1-rpc-url --use-default --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help) + opts="configs init run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__init) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__run) + opts="-a -v -h --reinit --components --enable-consensus --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --components) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --enable-consensus) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help) + opts="autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__autocomplete) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain) + opts="create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__accept__chain__ownership) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__consensus__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__l2__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__multicall3) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__paymaster) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__upgrader) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis) + opts="init-database server" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__init) + opts="configs" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__init__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__initialize__bridges) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__register__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__update__token__multiplier__setter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus) + opts="set-attester-committee get-attester-committee" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus__get__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus__set__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier) + opts="run init" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev) + opts="database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean) + opts="all containers contracts-cache" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__config__writer) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database) + opts="check-sqlx-data drop migrate new-migration prepare reset setup" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt) + opts="rustfmt contract prettier" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__generate__genesis) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__lint) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover) + opts="info insert-batch insert-version" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__send__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__snapshot) + opts="create" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__snapshot__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__status) + opts="ports" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__status__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem) + opts="create build-transactions init change-default-chain setup-observability" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__change__default__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__setup__observability) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer) + opts="init run-backend run" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__run__backend) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node) + opts="configs init run" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__markdown) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__portal) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover) + opts="init setup-keys run init-bellman-cuda compressor-keys" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__compressor__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__init__bellman__cuda) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__setup__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__update) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__markdown) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__portal) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help init setup-keys run init-bellman-cuda compressor-keys help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__compressor__keys) + opts="-v -h --path --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help) + opts="init setup-keys run init-bellman-cuda compressor-keys help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__compressor__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__init__bellman__cuda) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__setup__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__init) + opts="-u -d -v -h --dev --proof-store-dir --bucket-base-url --credentials-file --bucket-name --location --project-id --shall-save-to-public-bucket --public-store-dir --public-bucket-base-url --public-credentials-file --public-bucket-name --public-location --public-project-id --clone --bellman-cuda-dir --bellman-cuda --setup-compressor-key --path --region --mode --setup-keys --setup-database --prover-db-url --prover-db-name --use-default --dont-drop --cloud-type --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --proof-store-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bucket-base-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --credentials-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bucket-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --location) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --project-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --shall-save-to-public-bucket) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --public-store-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-bucket-base-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-credentials-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-bucket-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-location) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-project-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bellman-cuda-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bellman-cuda) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --setup-compressor-key) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --region) + COMPREPLY=($(compgen -W "us europe asia" -- "${cur}")) + return 0 + ;; + --mode) + COMPREPLY=($(compgen -W "download generate" -- "${cur}")) + return 0 + ;; + --setup-keys) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --setup-database) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --use-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -u) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --dont-drop) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -d) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --cloud-type) + COMPREPLY=($(compgen -W "gcp local" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__init__bellman__cuda) + opts="-v -h --clone --bellman-cuda-dir --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --bellman-cuda-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__run) + opts="-v -h --component --round --threads --max-allocation --witness-vector-generator-count --max-allocation --docker --tag --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --component) + COMPREPLY=($(compgen -W "gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor" -- "${cur}")) + return 0 + ;; + --round) + COMPREPLY=($(compgen -W "all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler" -- "${cur}")) + return 0 + ;; + --threads) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --max-allocation) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --witness-vector-generator-count) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --max-allocation) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --docker) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --tag) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__setup__keys) + opts="-v -h --region --mode --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --region) + COMPREPLY=($(compgen -W "us europe asia" -- "${cur}")) + return 0 + ;; + --mode) + COMPREPLY=($(compgen -W "download generate" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server) + opts="-a -v -h --components --genesis --additional-args --build --uring --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --components) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__update) + opts="-c -v -h --only-config --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + esac +} + +if [[ "${BASH_VERSINFO[0]}" -eq 4 && "${BASH_VERSINFO[1]}" -ge 4 || "${BASH_VERSINFO[0]}" -gt 4 ]]; then + complete -F _zkstack -o nosort -o bashdefault -o default zkstack +else + complete -F _zkstack -o bashdefault -o default zkstack +fi diff --git a/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs b/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs new file mode 100644 index 00000000000..8e44d644f39 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs @@ -0,0 +1,13 @@ +use std::path::PathBuf; + +use clap::Parser; + +#[derive(Debug, Parser)] +pub struct AutocompleteArgs { + /// The shell to generate the autocomplete script for + #[arg(long = "generate", value_enum)] + pub generator: clap_complete::Shell, + /// The out directory to write the autocomplete script to + #[arg(short, long, default_value = "./")] + pub out: PathBuf, +} diff --git a/zkstack_cli/crates/zkstack/src/commands/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs index d18b05c910e..5fa83aadf51 100644 --- a/zkstack_cli/crates/zkstack/src/commands/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs @@ -1,7 +1,9 @@ +pub use autocomplete::*; pub use containers::*; pub use run_server::*; pub use update::*; +mod autocomplete; mod containers; mod run_server; mod update; diff --git a/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs b/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs new file mode 100644 index 00000000000..0f2105cd5ef --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs @@ -0,0 +1,52 @@ +use std::{ + fs::File, + io::{BufWriter, Write}, +}; + +use anyhow::Context; +use clap::CommandFactory; +use clap_complete::{generate, Generator}; +use common::logger; + +use super::args::AutocompleteArgs; +use crate::{ + messages::{msg_generate_autocomplete_file, MSG_OUTRO_AUTOCOMPLETE_GENERATION}, + ZkStack, +}; + +pub fn run(args: AutocompleteArgs) -> anyhow::Result<()> { + let filename = autocomplete_file_name(&args.generator); + let path = args.out.join(filename); + + logger::info(msg_generate_autocomplete_file( + path.to_str() + .context("the output file path is an invalid UTF8 string")?, + )); + + let file = File::create(path).context("Failed to create file")?; + let mut writer = BufWriter::new(file); + + generate_completions(args.generator, &mut writer)?; + + logger::outro(MSG_OUTRO_AUTOCOMPLETE_GENERATION); + + Ok(()) +} + +pub fn generate_completions(gen: G, buf: &mut dyn Write) -> anyhow::Result<()> { + let mut cmd = ZkStack::command(); + let cmd_name = cmd.get_name().to_string(); + + generate(gen, &mut cmd, cmd_name, buf); + + Ok(()) +} + +pub fn autocomplete_file_name(shell: &clap_complete::Shell) -> &'static str { + match shell { + clap_complete::Shell::Bash => "zkstack.sh", + clap_complete::Shell::Fish => "zkstack.fish", + clap_complete::Shell::Zsh => "_zkstack.zsh", + _ => todo!(), + } +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs index 5fc46c1b227..ccf64ad27ac 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs @@ -1,7 +1,7 @@ use std::{path::PathBuf, str::FromStr}; use anyhow::{bail, Context}; -use clap::{Parser, ValueEnum}; +use clap::{Parser, ValueEnum, ValueHint}; use common::{Prompt, PromptConfirm, PromptSelect}; use config::forge_interface::deploy_ecosystem::output::Erc20Token; use serde::{Deserialize, Serialize}; @@ -53,7 +53,7 @@ pub struct ChainCreateArgs { prover_mode: Option, #[clap(long, help = MSG_WALLET_CREATION_HELP, value_enum)] wallet_creation: Option, - #[clap(long, help = MSG_WALLET_PATH_HELP)] + #[clap(long, help = MSG_WALLET_PATH_HELP, value_hint = ValueHint::FilePath)] wallet_path: Option, #[clap(long, help = MSG_L1_COMMIT_DATA_GENERATOR_MODE_HELP)] l1_batch_commit_data_generator_mode: Option, diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs index c26ad647524..b34809643cf 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs @@ -24,7 +24,7 @@ pub struct InitConfigsArgs { pub genesis_args: GenesisArgs, #[clap(long, help = MSG_L1_RPC_URL_HELP)] pub l1_rpc_url: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs index be4d28202b8..d92de9a0641 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs @@ -29,7 +29,7 @@ pub struct InitArgs { pub deploy_paymaster: Option, #[clap(long, help = MSG_L1_RPC_URL_HELP)] pub l1_rpc_url: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs index 71f21a02e73..6c3c3fa3d75 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs @@ -1,13 +1,23 @@ +use std::{ + fs::File, + io::{Read, Write}, + path::Path, +}; + +use anyhow::{bail, Context}; use clap::Parser; use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::commands::dev::{ - commands::lint_utils::{get_unignored_files, Target}, - messages::{ - msg_running_linter_for_extension_spinner, msg_running_linters_for_files, - MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, +use crate::commands::{ + autocomplete::{autocomplete_file_name, generate_completions}, + dev::{ + commands::lint_utils::{get_unignored_files, Target}, + messages::{ + msg_running_linter_for_extension_spinner, msg_running_linters_for_files, + MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, + }, }, }; @@ -30,6 +40,7 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { Target::Js, Target::Ts, Target::Contracts, + Target::Autocompletion, ] } else { args.targets.clone() @@ -43,10 +54,13 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { match target { Target::Rs => lint_rs(shell, &ecosystem, args.check)?, Target::Contracts => lint_contracts(shell, &ecosystem, args.check)?, + Target::Autocompletion => lint_autocompletion_files(shell, args.check)?, ext => lint(shell, &ecosystem, &ext, args.check)?, } } + logger::outro("Linting complete."); + Ok(()) } @@ -81,6 +95,7 @@ fn get_linter(target: &Target) -> Vec { Target::Js => vec!["eslint".to_string()], Target::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], Target::Contracts => vec![], + Target::Autocompletion => vec![], } } @@ -133,3 +148,45 @@ fn lint_contracts(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> an Ok(()) } + +fn lint_autocompletion_files(_shell: &Shell, check: bool) -> anyhow::Result<()> { + let completion_folder = Path::new("./zkstack_cli/crates/zkstack/completion/"); + if !completion_folder.exists() { + logger::info("WARNING: Please run this command from the project's root folder"); + return Ok(()); + } + + // Array of supported shells + let shells = [ + clap_complete::Shell::Bash, + clap_complete::Shell::Fish, + clap_complete::Shell::Zsh, + ]; + + for shell in shells { + let mut writer = Vec::new(); + + generate_completions(shell, &mut writer) + .context("Failed to generate autocompletion file")?; + + let new = String::from_utf8(writer)?; + + let path = completion_folder.join(autocomplete_file_name(&shell)); + let mut autocomplete_file = File::open(path.clone()) + .context(format!("failed to open {}", autocomplete_file_name(&shell)))?; + + let mut old = String::new(); + autocomplete_file.read_to_string(&mut old)?; + + if new != old { + if !check { + let mut autocomplete_file = File::create(path).context("Failed to create file")?; + autocomplete_file.write_all(new.as_bytes())?; + } else { + bail!("Autocompletion files need to be regenerated. Run `zkstack dev lint -t autocompletion` to fix this issue.") + } + } + } + + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs index 9095e445384..11a32504710 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs @@ -14,6 +14,7 @@ pub enum Target { Ts, Rs, Contracts, + Autocompletion, } #[derive(Deserialize, Serialize, Debug)] diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs index 83d505aa575..9e76850ff2e 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs @@ -7,6 +7,6 @@ use crate::commands::dev::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; pub struct FeesArgs { #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs index cf4734fd82e..b6ce278a1ca 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs @@ -11,6 +11,6 @@ pub struct RecoveryArgs { pub snapshot: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs index e4fb7fba2a9..9f86eec7f3d 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs @@ -13,6 +13,6 @@ pub struct RevertArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs index 2e5c50f4538..14cb5206f6a 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs @@ -1,7 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::bail; -use clap::Parser; +use clap::{Parser, ValueHint}; use common::{cmd::Cmd, logger, Prompt, PromptConfirm, PromptSelect}; use serde::{Deserialize, Serialize}; use slugify_rs::slugify; @@ -26,7 +26,7 @@ pub struct EcosystemCreateArgs { pub ecosystem_name: Option, #[clap(long, help = MSG_L1_NETWORK_HELP, value_enum)] pub l1_network: Option, - #[clap(long, help = MSG_LINK_TO_CODE_HELP)] + #[clap(long, help = MSG_LINK_TO_CODE_HELP, value_hint = ValueHint::DirPath)] pub link_to_code: Option, #[clap(flatten)] #[serde(flatten)] diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs index 830b7b25e47..a77a9c28ca9 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs @@ -96,7 +96,7 @@ pub struct EcosystemInitArgs { pub dev: bool, #[clap(long, short = 'o', help = MSG_OBSERVABILITY_HELP, default_missing_value = "true", num_args = 0..=1)] pub observability: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/mod.rs index c46400cc865..b5319cbc6bf 100644 --- a/zkstack_cli/crates/zkstack/src/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/mod.rs @@ -1,4 +1,5 @@ pub mod args; +pub mod autocomplete; pub mod chain; pub mod consensus; pub mod containers; diff --git a/zkstack_cli/crates/zkstack/src/main.rs b/zkstack_cli/crates/zkstack/src/main.rs index 404ac893810..3ebe26a4fa2 100644 --- a/zkstack_cli/crates/zkstack/src/main.rs +++ b/zkstack_cli/crates/zkstack/src/main.rs @@ -1,6 +1,6 @@ use clap::{command, Parser, Subcommand}; use commands::{ - args::{ContainersArgs, UpdateArgs}, + args::{AutocompleteArgs, ContainersArgs, UpdateArgs}, contract_verifier::ContractVerifierCommands, dev::DevCommands, }; @@ -29,6 +29,7 @@ mod utils; #[derive(Parser, Debug)] #[command( + name = "zkstack", version = version_message(env!("CARGO_PKG_VERSION")), about )] @@ -41,13 +42,15 @@ struct ZkStack { #[derive(Subcommand, Debug)] pub enum ZkStackSubcommands { + /// Create shell autocompletion files + Autocomplete(AutocompleteArgs), /// Ecosystem related commands #[command(subcommand, alias = "e")] Ecosystem(Box), /// Chain related commands #[command(subcommand, alias = "c")] Chain(Box), - /// Chain related commands + /// Supervisor related commands #[command(subcommand)] Dev(DevCommands), /// Prover related commands @@ -55,7 +58,7 @@ pub enum ZkStackSubcommands { Prover(ProverCommands), /// Run server Server(RunServerArgs), - /// External Node related commands + /// External Node related commands #[command(subcommand, alias = "en")] ExternalNode(ExternalNodeCommands), /// Run containers for local development @@ -69,11 +72,13 @@ pub enum ZkStackSubcommands { /// Run block-explorer #[command(subcommand)] Explorer(ExplorerCommands), + /// Consensus utilities #[command(subcommand)] Consensus(consensus::Command), /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), + /// Print markdown help #[command(hide = true)] Markdown, } @@ -98,8 +103,20 @@ async fn main() -> anyhow::Result<()> { // We must parse arguments before printing the intro, because some autogenerated // Clap commands (like `--version` would look odd otherwise). - let inception_args = ZkStack::parse(); + let zkstack_args = ZkStack::parse(); + + match run_subcommand(zkstack_args).await { + Ok(_) => {} + Err(error) => { + log_error(error); + std::process::exit(1); + } + } + Ok(()) +} + +async fn run_subcommand(zkstack_args: ZkStack) -> anyhow::Result<()> { init_prompt_theme(); logger::new_empty_line(); @@ -107,38 +124,30 @@ async fn main() -> anyhow::Result<()> { let shell = Shell::new().unwrap(); - init_global_config_inner(&shell, &inception_args.global)?; + init_global_config_inner(&shell, &zkstack_args.global)?; if !global_config().ignore_prerequisites { check_general_prerequisites(&shell); } - match run_subcommand(inception_args, &shell).await { - Ok(_) => {} - Err(error) => { - log_error(error); - std::process::exit(1); + match zkstack_args.command { + ZkStackSubcommands::Autocomplete(args) => commands::autocomplete::run(args)?, + ZkStackSubcommands::Ecosystem(args) => commands::ecosystem::run(&shell, *args).await?, + ZkStackSubcommands::Chain(args) => commands::chain::run(&shell, *args).await?, + ZkStackSubcommands::Dev(args) => commands::dev::run(&shell, args).await?, + ZkStackSubcommands::Prover(args) => commands::prover::run(&shell, args).await?, + ZkStackSubcommands::Server(args) => commands::server::run(&shell, args)?, + ZkStackSubcommands::Containers(args) => commands::containers::run(&shell, args)?, + ZkStackSubcommands::ExternalNode(args) => { + commands::external_node::run(&shell, args).await? } - } - Ok(()) -} - -async fn run_subcommand(inception_args: ZkStack, shell: &Shell) -> anyhow::Result<()> { - match inception_args.command { - ZkStackSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, *args).await?, - ZkStackSubcommands::Chain(args) => commands::chain::run(shell, *args).await?, - ZkStackSubcommands::Dev(args) => commands::dev::run(shell, args).await?, - ZkStackSubcommands::Prover(args) => commands::prover::run(shell, args).await?, - ZkStackSubcommands::Server(args) => commands::server::run(shell, args)?, - ZkStackSubcommands::Containers(args) => commands::containers::run(shell, args)?, - ZkStackSubcommands::ExternalNode(args) => commands::external_node::run(shell, args).await?, ZkStackSubcommands::ContractVerifier(args) => { - commands::contract_verifier::run(shell, args).await? + commands::contract_verifier::run(&shell, args).await? } - ZkStackSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, - ZkStackSubcommands::Consensus(cmd) => cmd.run(shell).await?, - ZkStackSubcommands::Portal => commands::portal::run(shell).await?, - ZkStackSubcommands::Update(args) => commands::update::run(shell, args).await?, + ZkStackSubcommands::Explorer(args) => commands::explorer::run(&shell, args).await?, + ZkStackSubcommands::Consensus(cmd) => cmd.run(&shell).await?, + ZkStackSubcommands::Portal => commands::portal::run(&shell).await?, + ZkStackSubcommands::Update(args) => commands::update::run(&shell, args).await?, ZkStackSubcommands::Markdown => { clap_markdown::print_help_markdown::(); } @@ -146,11 +155,8 @@ async fn run_subcommand(inception_args: ZkStack, shell: &Shell) -> anyhow::Resul Ok(()) } -fn init_global_config_inner( - shell: &Shell, - inception_args: &ZkStackGlobalArgs, -) -> anyhow::Result<()> { - if let Some(name) = &inception_args.chain { +fn init_global_config_inner(shell: &Shell, zkstack_args: &ZkStackGlobalArgs) -> anyhow::Result<()> { + if let Some(name) = &zkstack_args.chain { if let Ok(config) = EcosystemConfig::from_file(shell) { let chains = config.list_of_chains(); if !chains.contains(name) { @@ -163,9 +169,9 @@ fn init_global_config_inner( } } init_global_config(GlobalConfig { - verbose: inception_args.verbose, - chain_name: inception_args.chain.clone(), - ignore_prerequisites: inception_args.ignore_prerequisites, + verbose: zkstack_args.verbose, + chain_name: zkstack_args.chain.clone(), + ignore_prerequisites: zkstack_args.ignore_prerequisites, }); Ok(()) } diff --git a/zkstack_cli/crates/zkstack/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs index 6d6a1ceb566..e2145c18ffd 100644 --- a/zkstack_cli/crates/zkstack/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -16,6 +16,13 @@ pub(super) const MSG_CHAIN_NOT_INITIALIZED: &str = "Chain not initialized. Please create a chain first"; pub(super) const MSG_ARGS_VALIDATOR_ERR: &str = "Invalid arguments"; +/// Autocomplete message +pub(super) fn msg_generate_autocomplete_file(filename: &str) -> String { + format!("Generating completion file: {filename}") +} +pub(super) const MSG_OUTRO_AUTOCOMPLETE_GENERATION: &str = + "Autocompletion file correctly generated"; + /// Ecosystem create related messages pub(super) const MSG_L1_NETWORK_HELP: &str = "L1 Network"; pub(super) const MSG_LINK_TO_CODE_HELP: &str = "Code link"; From caee55fef4eed0ec58cceaeba277bbdedf5c6f51 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Wed, 23 Oct 2024 06:22:06 +0200 Subject: [PATCH 104/140] fix(consensus): preventing config update reverts (#3148) Random failures of consensus_global_config RPC may cause config reverts, until the consensus_genesis() RPC is fully deprecated. Although the problems of this sort are transient, this pr adds extra protection from this kind of situations to prevent unnecessary binary restarts. --- core/lib/dal/src/consensus_dal/mod.rs | 64 +++++++++++++++++---------- core/node/consensus/src/en.rs | 7 ++- 2 files changed, 46 insertions(+), 25 deletions(-) diff --git a/core/lib/dal/src/consensus_dal/mod.rs b/core/lib/dal/src/consensus_dal/mod.rs index 9515e93f2b3..4516434868c 100644 --- a/core/lib/dal/src/consensus_dal/mod.rs +++ b/core/lib/dal/src/consensus_dal/mod.rs @@ -16,10 +16,48 @@ use crate::{Core, CoreDal}; #[cfg(test)] mod tests; +/// Hash of the batch. pub fn batch_hash(info: &StoredBatchInfo) -> attester::BatchHash { attester::BatchHash(Keccak256::from_bytes(info.hash().0)) } +/// Verifies that the transition from `old` to `new` is admissible. +pub fn verify_config_transition(old: &GlobalConfig, new: &GlobalConfig) -> anyhow::Result<()> { + anyhow::ensure!( + old.genesis.chain_id == new.genesis.chain_id, + "changing chain_id is not allowed: old = {:?}, new = {:?}", + old.genesis.chain_id, + new.genesis.chain_id, + ); + // Note that it may happen that the fork number didn't change, + // in case the binary was updated to support more fields in genesis struct. + // In such a case, the old binary was not able to connect to the consensus network, + // because of the genesis hash mismatch. + // TODO: Perhaps it would be better to deny unknown fields in the genesis instead. + // It would require embedding the genesis either as a json string or protobuf bytes within + // the global config, so that the global config can be parsed with + // `deny_unknown_fields:false` while genesis would be parsed with + // `deny_unknown_fields:true`. + anyhow::ensure!( + old.genesis.fork_number <= new.genesis.fork_number, + "transition to a past fork is not allowed: old = {:?}, new = {:?}", + old.genesis.fork_number, + new.genesis.fork_number, + ); + new.genesis.verify().context("genesis.verify()")?; + // This is a temporary hack until the `consensus_genesis()` RPC is disabled. + if new + == (&GlobalConfig { + genesis: old.genesis.clone(), + registry_address: None, + seed_peers: [].into(), + }) + { + anyhow::bail!("new config is equal to truncated old config, which means that it was sourced from the wrong endpoint"); + } + Ok(()) +} + /// Storage access methods for `zksync_core::consensus` module. #[derive(Debug)] pub struct ConsensusDal<'a, 'c> { @@ -94,6 +132,8 @@ impl ConsensusDal<'_, '_> { if got == want { return Ok(()); } + verify_config_transition(got, want)?; + // If genesis didn't change, just update the config. if got.genesis == want.genesis { let s = zksync_protobuf::serde::Serialize; @@ -112,30 +152,6 @@ impl ConsensusDal<'_, '_> { txn.commit().await?; return Ok(()); } - - // Verify the genesis change. - anyhow::ensure!( - got.genesis.chain_id == want.genesis.chain_id, - "changing chain_id is not allowed: old = {:?}, new = {:?}", - got.genesis.chain_id, - want.genesis.chain_id, - ); - // Note that it may happen that the fork number didn't change, - // in case the binary was updated to support more fields in genesis struct. - // In such a case, the old binary was not able to connect to the consensus network, - // because of the genesis hash mismatch. - // TODO: Perhaps it would be better to deny unknown fields in the genesis instead. - // It would require embedding the genesis either as a json string or protobuf bytes within - // the global config, so that the global config can be parsed with - // `deny_unknown_fields:false` while genesis would be parsed with - // `deny_unknown_fields:true`. - anyhow::ensure!( - got.genesis.fork_number <= want.genesis.fork_number, - "transition to a past fork is not allowed: old = {:?}, new = {:?}", - got.genesis.fork_number, - want.genesis.fork_number, - ); - want.genesis.verify().context("genesis.verify()")?; } // Reset the consensus state. diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 518a7ebb29a..8158cc5aeb2 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -100,7 +100,12 @@ impl EN { let old = old; loop { if let Ok(new) = self.fetch_global_config(ctx).await { - if new != old { + // We verify the transition here to work around the situation + // where `consenus_global_config()` RPC fails randomly and fallback + // to `consensus_genesis()` RPC activates. + if new != old + && consensus_dal::verify_config_transition(&old, &new).is_ok() + { return Err(anyhow::format_err!( "global config changed: old {old:?}, new {new:?}" ) From 5092031050b30c39107df788317a15eaa921b136 Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Wed, 23 Oct 2024 19:28:32 +1100 Subject: [PATCH 105/140] fix(zkstack_cli): make progress bar optional in non-terminal envs (#3146) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Enables plain logging as a fallback for progress bar. ## Why ❔ Spinner does not print anything when zkstack is redirected to a file, piped or is just ran in an env with no virtual terminal. Most notably this affects our CI where all spinner messages are just swallowed into nowhere. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- zkstack_cli/crates/common/src/term/spinner.rs | 57 +++++++++++++++---- 1 file changed, 45 insertions(+), 12 deletions(-) diff --git a/zkstack_cli/crates/common/src/term/spinner.rs b/zkstack_cli/crates/common/src/term/spinner.rs index b97ba075ac4..3ec2631804a 100644 --- a/zkstack_cli/crates/common/src/term/spinner.rs +++ b/zkstack_cli/crates/common/src/term/spinner.rs @@ -1,34 +1,40 @@ -use std::time::Instant; +use std::{fmt::Display, io::IsTerminal, time::Instant}; use cliclack::{spinner, ProgressBar}; -use crate::config::global_config; +use crate::{config::global_config, logger}; /// Spinner is a helper struct to show a spinner while some operation is running. pub struct Spinner { msg: String, - pb: ProgressBar, + output: SpinnerOutput, time: Instant, } impl Spinner { /// Create a new spinner with a message. pub fn new(msg: &str) -> Self { - let pb = spinner(); - pb.start(msg); - if global_config().verbose { - pb.stop(msg); - } + let output = if std::io::stdout().is_terminal() { + let pb = spinner(); + pb.start(msg); + if global_config().verbose { + pb.stop(msg); + } + SpinnerOutput::Progress(pb) + } else { + logger::info(msg); + SpinnerOutput::Plain() + }; Spinner { msg: msg.to_owned(), - pb, + output, time: Instant::now(), } } /// Manually finish the spinner. pub fn finish(self) { - self.pb.stop(format!( + self.output.stop(format!( "{} done in {} secs", self.msg, self.time.elapsed().as_secs_f64() @@ -37,7 +43,7 @@ impl Spinner { /// Interrupt the spinner with a failed message. pub fn fail(self) { - self.pb.error(format!( + self.output.error(format!( "{} failed in {} secs", self.msg, self.time.elapsed().as_secs_f64() @@ -46,6 +52,33 @@ impl Spinner { /// Freeze the spinner with current message. pub fn freeze(self) { - self.pb.stop(self.msg); + self.output.stop(self.msg); + } +} + +/// An abstraction that makes interactive progress bar optional in environments where virtual +/// terminal is not available. +/// +/// Uses plain `logger::{info,error}` as the fallback. +/// +/// See https://github.com/console-rs/indicatif/issues/530 for more details. +enum SpinnerOutput { + Progress(ProgressBar), + Plain(), +} + +impl SpinnerOutput { + fn error(&self, msg: impl Display) { + match self { + SpinnerOutput::Progress(pb) => pb.error(msg), + SpinnerOutput::Plain() => logger::error(msg), + } + } + + fn stop(self, msg: impl Display) { + match self { + SpinnerOutput::Progress(pb) => pb.stop(msg), + SpinnerOutput::Plain() => logger::info(msg), + } } } From 11e525e49531e2aa2d556337350b9af9355727fc Mon Sep 17 00:00:00 2001 From: Shahar Kaminsky Date: Wed, 23 Oct 2024 14:05:16 +0300 Subject: [PATCH 106/140] fix(zk chains): Increase Batch Seal Default Deadline (#3154) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR increases the L1 Batch seadl default deadline from 1 hour to 8 hours. ## Why ❔ - For lower TPS chains, this configuration matches reality better and is less wasteful in terms of Ethereum interactions. - For higher TPS chains, the batches will be sealed by other seal criteria anyways ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- etc/env/file_based/overrides/mainnet.yaml | 3 ++- etc/env/file_based/overrides/testnet.yaml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/etc/env/file_based/overrides/mainnet.yaml b/etc/env/file_based/overrides/mainnet.yaml index 0600abf694c..7565aac869a 100644 --- a/etc/env/file_based/overrides/mainnet.yaml +++ b/etc/env/file_based/overrides/mainnet.yaml @@ -1,5 +1,6 @@ state_keeper: - block_commit_deadline_ms: 3600000 + # Default batch seal time deadline: 8 hours + block_commit_deadline_ms: 28000000 minimal_l2_gas_price: 45250000 eth: sender: diff --git a/etc/env/file_based/overrides/testnet.yaml b/etc/env/file_based/overrides/testnet.yaml index e4da1ac96e2..d36cf9fc7bc 100644 --- a/etc/env/file_based/overrides/testnet.yaml +++ b/etc/env/file_based/overrides/testnet.yaml @@ -1,5 +1,6 @@ state_keeper: - block_commit_deadline_ms: 3600000 + # Default batch seal time deadline: 8 hours + block_commit_deadline_ms: 28000000 minimal_l2_gas_price: 25000000 eth: sender: From 35e84cc03a7fdd315932fb3020fe41c95a6e4bca Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Wed, 23 Oct 2024 15:15:47 +0400 Subject: [PATCH 107/140] feat(api): Implement eth_maxPriorityFeePerGas (#3135) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixes #13 Implements `eth_maxPriorityFeePerGas` method. ## Why ❔ This method is a de-facto standard now, and SDKs (e.g. viem) can use it assuming that it's supported. Even given that we're not really using EIP1559, we should still support it and return 0. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/web3_decl/src/namespaces/eth.rs | 3 +++ .../api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs | 4 ++++ core/node/api_server/src/web3/namespaces/eth.rs | 5 +++++ core/tests/ts-integration/tests/api/web3.test.ts | 6 ++++-- 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index 399773b845d..40cb6300cff 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -185,6 +185,9 @@ pub trait EthNamespace { newest_block: BlockNumber, reward_percentiles: Vec, ) -> RpcResult; + + #[method(name = "maxPriorityFeePerGas")] + async fn max_priority_fee_per_gas(&self) -> RpcResult; } #[cfg(feature = "server")] diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs index cc2209a35d3..34275601375 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs @@ -268,4 +268,8 @@ impl EthNamespaceServer for EthNamespace { .await .map_err(|err| self.current_method().map_err(err)) } + + async fn max_priority_fee_per_gas(&self) -> RpcResult { + Ok(self.max_priority_fee_per_gas_impl()) + } } diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 5206cd3bc2b..ee37cb989f1 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -863,6 +863,11 @@ impl EthNamespace { } }) } + + pub fn max_priority_fee_per_gas_impl(&self) -> U256 { + // ZKsync does not require priority fee. + 0u64.into() + } } // Bogus methods. diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index 6f1b6c3aa6b..ceed9654df9 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -189,7 +189,8 @@ describe('web3 API compatibility tests', () => { ['eth_getCompilers', [], []], ['eth_hashrate', [], '0x0'], ['eth_mining', [], false], - ['eth_getUncleCountByBlockNumber', ['0x0'], '0x0'] + ['eth_getUncleCountByBlockNumber', ['0x0'], '0x0'], + ['eth_maxPriorityFeePerGas', [], '0x0'] ])('Should test bogus web3 methods (%s)', async (method: string, input: string[], output: string) => { await expect(alice.provider.send(method, input)).resolves.toEqual(output); }); @@ -271,7 +272,8 @@ describe('web3 API compatibility tests', () => { const eip1559ApiReceipt = await alice.provider.getTransaction(eip1559Tx.hash); expect(eip1559ApiReceipt.maxFeePerGas).toEqual(eip1559Tx.maxFeePerGas!); - expect(eip1559ApiReceipt.maxPriorityFeePerGas).toEqual(eip1559Tx.maxPriorityFeePerGas!); + // `ethers` will use value provided by `eth_maxPriorityFeePerGas`, and we return 0 there. + expect(eip1559ApiReceipt.maxPriorityFeePerGas).toEqual(0n); }); test('Should test getFilterChanges for pending transactions', async () => { From 08a3fe7ffd0410c51334193068649905337d5e84 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 23 Oct 2024 13:19:42 +0200 Subject: [PATCH 108/140] fix: Fix counter metric type to be Counter. (#3153) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fix counter metric type to be Counter. ## Why ❔ To have correct calculation of network errors across restarts. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/bin/zksync_tee_prover/src/metrics.rs | 4 ++-- core/bin/zksync_tee_prover/src/tee_prover.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/bin/zksync_tee_prover/src/metrics.rs b/core/bin/zksync_tee_prover/src/metrics.rs index 9f535967f79..769a8bbc7e0 100644 --- a/core/bin/zksync_tee_prover/src/metrics.rs +++ b/core/bin/zksync_tee_prover/src/metrics.rs @@ -2,7 +2,7 @@ use std::time::Duration; -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; +use vise::{Buckets, Counter, Gauge, Histogram, Metrics, Unit}; #[derive(Debug, Metrics)] #[metrics(prefix = "tee_prover")] @@ -13,7 +13,7 @@ pub(crate) struct TeeProverMetrics { pub proof_generation_time: Histogram, #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] pub proof_submitting_time: Histogram, - pub network_errors_counter: Gauge, + pub network_errors_counter: Counter, pub last_batch_number_processed: Gauge, } diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index bb7176644e6..5d22d1e7c63 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -155,7 +155,7 @@ impl Task for TeeProver { } } Err(err) => { - METRICS.network_errors_counter.inc_by(1); + METRICS.network_errors_counter.inc(); if !err.is_retriable() || retries > config.max_retries { return Err(err.into()); } From 0d78228c8c6a848644ded1e6807ee88f80212c9f Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:23:19 +0400 Subject: [PATCH 109/140] chore(main): release core 25.0.0 (#3094) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit :robot: I have created a release *beep* *boop* --- ## [25.0.0](https://github.com/matter-labs/zksync-era/compare/core-v24.29.0...core-v25.0.0) (2024-10-23) ### ⚠ BREAKING CHANGES * **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) ### Features * Add CoinMarketCap external API ([#2971](https://github.com/matter-labs/zksync-era/issues/2971)) ([c1cb30e](https://github.com/matter-labs/zksync-era/commit/c1cb30e59ca1d0b5fea5fe0980082aea0eb04aa2)) * **api:** Implement eth_maxPriorityFeePerGas ([#3135](https://github.com/matter-labs/zksync-era/issues/3135)) ([35e84cc](https://github.com/matter-labs/zksync-era/commit/35e84cc03a7fdd315932fb3020fe41c95a6e4bca)) * **api:** Make acceptable values cache lag configurable ([#3028](https://github.com/matter-labs/zksync-era/issues/3028)) ([6747529](https://github.com/matter-labs/zksync-era/commit/67475292ff770d2edd6884be27f976a4144778ae)) * **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) ([c60a348](https://github.com/matter-labs/zksync-era/commit/c60a3482ee09b3e371163e62f49e83bc6d6f4548)) * **external-node:** save protocol version before opening a batch ([#3136](https://github.com/matter-labs/zksync-era/issues/3136)) ([d6de4f4](https://github.com/matter-labs/zksync-era/commit/d6de4f40ddce339c760c95e2bf4b8aceb571af7f)) * Prover e2e test ([#2975](https://github.com/matter-labs/zksync-era/issues/2975)) ([0edd796](https://github.com/matter-labs/zksync-era/commit/0edd7962429b3530ae751bd7cc947c97193dd0ca)) * **prover:** Add min_provers and dry_run features. Improve metrics and test. ([#3129](https://github.com/matter-labs/zksync-era/issues/3129)) ([7c28964](https://github.com/matter-labs/zksync-era/commit/7c289649b7b3c418c7193a35b51c264cf4970f3c)) * **tee_verifier:** speedup SQL query for new jobs ([#3133](https://github.com/matter-labs/zksync-era/issues/3133)) ([30ceee8](https://github.com/matter-labs/zksync-era/commit/30ceee8a48046e349ff0234ebb24d468a0e0876c)) * vm2 tracers can access storage ([#3114](https://github.com/matter-labs/zksync-era/issues/3114)) ([e466b52](https://github.com/matter-labs/zksync-era/commit/e466b52948e3c4ed1cb5af4fd999a52028e4d216)) * **vm:** Return compressed bytecodes from `push_transaction()` ([#3126](https://github.com/matter-labs/zksync-era/issues/3126)) ([37f209f](https://github.com/matter-labs/zksync-era/commit/37f209fec8e7cb65c0e60003d46b9ea69c43caf1)) ### Bug Fixes * **call_tracer:** Flat call tracer fixes for blocks ([#3095](https://github.com/matter-labs/zksync-era/issues/3095)) ([30ddb29](https://github.com/matter-labs/zksync-era/commit/30ddb292977340beab37a81f75c35480cbdd59d3)) * **consensus:** preventing config update reverts ([#3148](https://github.com/matter-labs/zksync-era/issues/3148)) ([caee55f](https://github.com/matter-labs/zksync-era/commit/caee55fef4eed0ec58cceaeba277bbdedf5c6f51)) * **en:** Return `SyncState` health check ([#3142](https://github.com/matter-labs/zksync-era/issues/3142)) ([abeee81](https://github.com/matter-labs/zksync-era/commit/abeee8190d3c3a5e577d71024bdfb30ff516ad03)) * **external-node:** delete empty unsealed batch on EN initialization ([#3125](https://github.com/matter-labs/zksync-era/issues/3125)) ([5d5214b](https://github.com/matter-labs/zksync-era/commit/5d5214ba983823b306495d34fdd1d46abacce07a)) * Fix counter metric type to be Counter. ([#3153](https://github.com/matter-labs/zksync-era/issues/3153)) ([08a3fe7](https://github.com/matter-labs/zksync-era/commit/08a3fe7ffd0410c51334193068649905337d5e84)) * **mempool:** minor mempool improvements ([#3113](https://github.com/matter-labs/zksync-era/issues/3113)) ([cd16083](https://github.com/matter-labs/zksync-era/commit/cd160830a0b7ebe5af4ecbd944da1cd51af3528a)) * **prover:** Run for zero queue to allow scaling down to 0 ([#3115](https://github.com/matter-labs/zksync-era/issues/3115)) ([bbe1919](https://github.com/matter-labs/zksync-era/commit/bbe191937fa5c5711a7164fd4f0c2ae65cda0833)) * restore instruction count functionality ([#3081](https://github.com/matter-labs/zksync-era/issues/3081)) ([6159f75](https://github.com/matter-labs/zksync-era/commit/6159f7531a0340a69c4926c4e0325811ed7cabb8)) * **state-keeper:** save call trace for upgrade txs ([#3132](https://github.com/matter-labs/zksync-era/issues/3132)) ([e1c363f](https://github.com/matter-labs/zksync-era/commit/e1c363f8f5e03c8d62bba1523f17b87d6a0e25ad)) * **tee_prover:** add zstd compression ([#3144](https://github.com/matter-labs/zksync-era/issues/3144)) ([7241ae1](https://github.com/matter-labs/zksync-era/commit/7241ae139b2b6bf9a9966eaa2f22203583a3786f)) * **tee_verifier:** correctly initialize storage for re-execution ([#3017](https://github.com/matter-labs/zksync-era/issues/3017)) ([9d88373](https://github.com/matter-labs/zksync-era/commit/9d88373f1b745c489e98e5ef542644a70e815498)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --------- Co-authored-by: zksync-era-bot --- .github/release-please/manifest.json | 2 +- Cargo.lock | 2 +- core/CHANGELOG.md | 35 ++++++++++++++++++++++++++++ core/bin/external_node/Cargo.toml | 2 +- 4 files changed, 38 insertions(+), 3 deletions(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index a56866a8bd7..a0d1d73bdda 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.29.0", + "core": "25.0.0", "prover": "16.5.0", "zkstack_cli": "0.1.2" } diff --git a/Cargo.lock b/Cargo.lock index 05c26a74834..7e4cad34cf8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10220,7 +10220,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.29.0" +version = "25.0.0" dependencies = [ "anyhow", "assert_matches", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 59b49af1554..56239303cd4 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,40 @@ # Changelog +## [25.0.0](https://github.com/matter-labs/zksync-era/compare/core-v24.29.0...core-v25.0.0) (2024-10-23) + + +### ⚠ BREAKING CHANGES + +* **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) + +### Features + +* Add CoinMarketCap external API ([#2971](https://github.com/matter-labs/zksync-era/issues/2971)) ([c1cb30e](https://github.com/matter-labs/zksync-era/commit/c1cb30e59ca1d0b5fea5fe0980082aea0eb04aa2)) +* **api:** Implement eth_maxPriorityFeePerGas ([#3135](https://github.com/matter-labs/zksync-era/issues/3135)) ([35e84cc](https://github.com/matter-labs/zksync-era/commit/35e84cc03a7fdd315932fb3020fe41c95a6e4bca)) +* **api:** Make acceptable values cache lag configurable ([#3028](https://github.com/matter-labs/zksync-era/issues/3028)) ([6747529](https://github.com/matter-labs/zksync-era/commit/67475292ff770d2edd6884be27f976a4144778ae)) +* **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) ([c60a348](https://github.com/matter-labs/zksync-era/commit/c60a3482ee09b3e371163e62f49e83bc6d6f4548)) +* **external-node:** save protocol version before opening a batch ([#3136](https://github.com/matter-labs/zksync-era/issues/3136)) ([d6de4f4](https://github.com/matter-labs/zksync-era/commit/d6de4f40ddce339c760c95e2bf4b8aceb571af7f)) +* Prover e2e test ([#2975](https://github.com/matter-labs/zksync-era/issues/2975)) ([0edd796](https://github.com/matter-labs/zksync-era/commit/0edd7962429b3530ae751bd7cc947c97193dd0ca)) +* **prover:** Add min_provers and dry_run features. Improve metrics and test. ([#3129](https://github.com/matter-labs/zksync-era/issues/3129)) ([7c28964](https://github.com/matter-labs/zksync-era/commit/7c289649b7b3c418c7193a35b51c264cf4970f3c)) +* **tee_verifier:** speedup SQL query for new jobs ([#3133](https://github.com/matter-labs/zksync-era/issues/3133)) ([30ceee8](https://github.com/matter-labs/zksync-era/commit/30ceee8a48046e349ff0234ebb24d468a0e0876c)) +* vm2 tracers can access storage ([#3114](https://github.com/matter-labs/zksync-era/issues/3114)) ([e466b52](https://github.com/matter-labs/zksync-era/commit/e466b52948e3c4ed1cb5af4fd999a52028e4d216)) +* **vm:** Return compressed bytecodes from `push_transaction()` ([#3126](https://github.com/matter-labs/zksync-era/issues/3126)) ([37f209f](https://github.com/matter-labs/zksync-era/commit/37f209fec8e7cb65c0e60003d46b9ea69c43caf1)) + + +### Bug Fixes + +* **call_tracer:** Flat call tracer fixes for blocks ([#3095](https://github.com/matter-labs/zksync-era/issues/3095)) ([30ddb29](https://github.com/matter-labs/zksync-era/commit/30ddb292977340beab37a81f75c35480cbdd59d3)) +* **consensus:** preventing config update reverts ([#3148](https://github.com/matter-labs/zksync-era/issues/3148)) ([caee55f](https://github.com/matter-labs/zksync-era/commit/caee55fef4eed0ec58cceaeba277bbdedf5c6f51)) +* **en:** Return `SyncState` health check ([#3142](https://github.com/matter-labs/zksync-era/issues/3142)) ([abeee81](https://github.com/matter-labs/zksync-era/commit/abeee8190d3c3a5e577d71024bdfb30ff516ad03)) +* **external-node:** delete empty unsealed batch on EN initialization ([#3125](https://github.com/matter-labs/zksync-era/issues/3125)) ([5d5214b](https://github.com/matter-labs/zksync-era/commit/5d5214ba983823b306495d34fdd1d46abacce07a)) +* Fix counter metric type to be Counter. ([#3153](https://github.com/matter-labs/zksync-era/issues/3153)) ([08a3fe7](https://github.com/matter-labs/zksync-era/commit/08a3fe7ffd0410c51334193068649905337d5e84)) +* **mempool:** minor mempool improvements ([#3113](https://github.com/matter-labs/zksync-era/issues/3113)) ([cd16083](https://github.com/matter-labs/zksync-era/commit/cd160830a0b7ebe5af4ecbd944da1cd51af3528a)) +* **prover:** Run for zero queue to allow scaling down to 0 ([#3115](https://github.com/matter-labs/zksync-era/issues/3115)) ([bbe1919](https://github.com/matter-labs/zksync-era/commit/bbe191937fa5c5711a7164fd4f0c2ae65cda0833)) +* restore instruction count functionality ([#3081](https://github.com/matter-labs/zksync-era/issues/3081)) ([6159f75](https://github.com/matter-labs/zksync-era/commit/6159f7531a0340a69c4926c4e0325811ed7cabb8)) +* **state-keeper:** save call trace for upgrade txs ([#3132](https://github.com/matter-labs/zksync-era/issues/3132)) ([e1c363f](https://github.com/matter-labs/zksync-era/commit/e1c363f8f5e03c8d62bba1523f17b87d6a0e25ad)) +* **tee_prover:** add zstd compression ([#3144](https://github.com/matter-labs/zksync-era/issues/3144)) ([7241ae1](https://github.com/matter-labs/zksync-era/commit/7241ae139b2b6bf9a9966eaa2f22203583a3786f)) +* **tee_verifier:** correctly initialize storage for re-execution ([#3017](https://github.com/matter-labs/zksync-era/issues/3017)) ([9d88373](https://github.com/matter-labs/zksync-era/commit/9d88373f1b745c489e98e5ef542644a70e815498)) + ## [24.29.0](https://github.com/matter-labs/zksync-era/compare/core-v24.28.0...core-v24.29.0) (2024-10-14) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 25f2400c79b..4e3dc548cf8 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.29.0" # x-release-please-version +version = "25.0.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true From bfedac03b53055c6e2d5fa6bd6bdc78e2cb1724c Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 23 Oct 2024 15:07:41 +0200 Subject: [PATCH 110/140] feat(prover): Autoscaler sends scale request to appropriate agents. (#3150) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add feature for Autoscaler to send scale request to appropriate agents. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. ref ZKD-1855 --- .../crates/bin/prover_autoscaler/src/agent.rs | 4 +- .../prover_autoscaler/src/cluster_types.rs | 2 + .../prover_autoscaler/src/global/scaler.rs | 96 +++++++++++++++--- .../prover_autoscaler/src/global/watcher.rs | 97 ++++++++++++++++++- 4 files changed, 177 insertions(+), 22 deletions(-) diff --git a/prover/crates/bin/prover_autoscaler/src/agent.rs b/prover/crates/bin/prover_autoscaler/src/agent.rs index 3269a43815c..f810bc41672 100644 --- a/prover/crates/bin/prover_autoscaler/src/agent.rs +++ b/prover/crates/bin/prover_autoscaler/src/agent.rs @@ -84,14 +84,14 @@ async fn get_cluster(State(app): State) -> Result, AppError> Ok(Json(cluster)) } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct ScaleDeploymentRequest { pub namespace: String, pub name: String, pub size: i32, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct ScaleRequest { pub deployments: Vec, } diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs index c25b624b5d4..b800b86f3c2 100644 --- a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -45,6 +45,8 @@ pub struct Cluster { #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Clusters { pub clusters: HashMap, + /// Map from cluster to index in agent URLs Vec. + pub agent_ids: HashMap, } #[derive(Default, Debug, EnumString, Display, Hash, PartialEq, Eq, Clone, Copy)] diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index f10902f5dd2..884174562a1 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -8,6 +8,7 @@ use zksync_config::configs::prover_autoscaler::{Gpu, ProverAutoscalerScalerConfi use super::{queuer, watcher}; use crate::{ + agent::{ScaleDeploymentRequest, ScaleRequest}, cluster_types::{Cluster, Clusters, Pod, PodStatus}, metrics::AUTOSCALER_METRICS, task_wiring::Task, @@ -48,6 +49,16 @@ static PROVER_DEPLOYMENT_RE: Lazy = static PROVER_POD_RE: Lazy = Lazy::new(|| Regex::new(r"^circuit-prover-gpu(-(?[ltvpa]\d+))?").unwrap()); +/// gpu_to_prover converts Gpu type to corresponding deployment name. +fn gpu_to_prover(gpu: Gpu) -> String { + let s = "circuit-prover-gpu"; + match gpu { + Gpu::Unknown => "".into(), + Gpu::L4 => s.into(), + _ => format!("{}-{}", s, gpu.to_string().to_lowercase()), + } +} + pub struct Scaler { /// namespace to Protocol Version configuration. namespaces: HashMap, @@ -299,6 +310,47 @@ impl Scaler { } } +fn diff( + namespace: &str, + provers: HashMap, + clusters: &Clusters, + requests: &mut HashMap, +) { + provers + .into_iter() + .for_each(|(GPUPoolKey { cluster, gpu }, n)| { + let prover = gpu_to_prover(gpu); + clusters + .clusters + .get(&cluster) + .and_then(|c| c.namespaces.get(namespace)) + .and_then(|ns| ns.deployments.get(&prover)) + .map_or_else( + || { + tracing::error!( + "Wasn't able to find deployment {} in cluster {}, namespace {}", + prover, + cluster, + namespace + ) + }, + |d| { + if d.desired != n as i32 { + requests + .entry(cluster.clone()) + .or_default() + .deployments + .push(ScaleDeploymentRequest { + namespace: namespace.into(), + name: prover.clone(), + size: n as i32, + }); + } + }, + ); + }) +} + /// is_namespace_running returns true if there are some pods running in it. fn is_namespace_running(namespace: &str, clusters: &Clusters) -> bool { clusters @@ -309,7 +361,7 @@ fn is_namespace_running(namespace: &str, clusters: &Clusters) -> bool { .flat_map(|v| v.deployments.values()) .map( |d| d.running + d.desired, // If there is something running or expected to run, we - // should consider the namespace. + // should re-evaluate the namespace. ) .sum::() > 0 @@ -320,24 +372,32 @@ impl Task for Scaler { async fn invoke(&self) -> anyhow::Result<()> { let queue = self.queuer.get_queue().await.unwrap(); - let guard = self.watcher.data.lock().await; - if let Err(err) = watcher::check_is_ready(&guard.is_ready) { - AUTOSCALER_METRICS.clusters_not_ready.inc(); - tracing::warn!("Skipping Scaler run: {}", err); - return Ok(()); - } + let mut scale_requests: HashMap = HashMap::new(); + { + let guard = self.watcher.data.lock().await; // Keeping the lock during all calls of run() for + // consitency. + if let Err(err) = watcher::check_is_ready(&guard.is_ready) { + AUTOSCALER_METRICS.clusters_not_ready.inc(); + tracing::warn!("Skipping Scaler run: {}", err); + return Ok(()); + } - for (ns, ppv) in &self.namespaces { - let q = queue.queue.get(ppv).cloned().unwrap_or(0); - tracing::debug!("Running eval for namespace {ns} and PPV {ppv} found queue {q}"); - if q > 0 || is_namespace_running(ns, &guard.clusters) { - let provers = self.run(ns, q, &guard.clusters); - for (k, num) in &provers { - AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] - .set(*num as u64); + for (ns, ppv) in &self.namespaces { + let q = queue.queue.get(ppv).cloned().unwrap_or(0); + tracing::debug!("Running eval for namespace {ns} and PPV {ppv} found queue {q}"); + if q > 0 || is_namespace_running(ns, &guard.clusters) { + let provers = self.run(ns, q, &guard.clusters); + for (k, num) in &provers { + AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] + .set(*num as u64); + } + diff(ns, provers, &guard.clusters, &mut scale_requests); } - // TODO: compare before and desired, send commands [cluster,namespace,deployment] -> provers } + } // Unlock self.watcher.data. + + if let Err(err) = self.watcher.send_scale(scale_requests).await { + tracing::error!("Failed scale request: {}", err); } Ok(()) @@ -401,6 +461,7 @@ mod tests { }, )] .into(), + ..Default::default() }, ), [( @@ -467,6 +528,7 @@ mod tests { ) ] .into(), + ..Default::default() }, ), [ @@ -552,6 +614,7 @@ mod tests { ) ] .into(), + ..Default::default() }, ), [ @@ -662,6 +725,7 @@ mod tests { ) ] .into(), + ..Default::default() }, ), [ diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs index 646b320e12d..1b54d332ebb 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -2,12 +2,17 @@ use std::{collections::HashMap, sync::Arc}; use anyhow::{anyhow, Context, Ok, Result}; use futures::future; -use reqwest::Method; +use reqwest::{ + header::{HeaderMap, HeaderValue, CONTENT_TYPE}, + Method, +}; + use tokio::sync::Mutex; use url::Url; use zksync_utils::http_with_retries::send_request_with_retries; use crate::{ + agent::{ScaleRequest, ScaleResponse}, cluster_types::{Cluster, Clusters}, metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}, task_wiring::Task, @@ -51,13 +56,96 @@ impl Watcher { }) .collect(), data: Arc::new(Mutex::new(WatchedData { - clusters: Clusters { - clusters: HashMap::new(), - }, + clusters: Clusters::default(), is_ready: vec![false; size], })), } } + + pub async fn send_scale(&self, requests: HashMap) -> anyhow::Result<()> { + let id_requests: HashMap; + { + // Convert cluster names into ids. Holding the data lock. + let guard = self.data.lock().await; + id_requests = requests + .into_iter() + .filter_map(|(cluster, scale_request)| { + guard.clusters.agent_ids.get(&cluster).map_or_else( + || { + tracing::error!("Failed to find id for cluster {}", cluster); + None + }, + |id| Some((*id, scale_request)), + ) + }) + .collect(); + } + + let handles: Vec<_> = id_requests + .into_iter() + .map(|(id, sr)| { + let url: String = self.cluster_agents[id] + .clone() + .join("/scale") + .unwrap() + .to_string(); + tracing::debug!("Sending scale request to {}, data: {:?}.", url, sr); + tokio::spawn(async move { + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + let response = send_request_with_retries( + &url, + MAX_RETRIES, + Method::POST, + Some(headers), + Some(serde_json::to_vec(&sr)?), + ) + .await; + let response = response.map_err(|err| { + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })?; + AUTOSCALER_METRICS.calls[&(url, response.status().as_u16())].inc(); + let response = response + .json::() + .await + .context("Failed to read response as json"); + Ok((id, response)) + }) + }) + .collect(); + + future::try_join_all( + future::join_all(handles) + .await + .into_iter() + .map(|h| async move { + let (id, res) = h??; + + let errors: Vec<_> = res + .expect("failed to do request to Agent") + .scale_result + .iter() + .filter_map(|e| { + if !e.is_empty() { + Some(format!("Agent {} failed to scale: {}", id, e)) + } else { + None + } + }) + .collect(); + + if !errors.is_empty() { + return Err(anyhow!(errors.join(";"))); + } + Ok(()) + }) + .collect::>(), + ) + .await?; + + Ok(()) + } } #[async_trait::async_trait] @@ -102,6 +190,7 @@ impl Task for Watcher { let (i, res) = h??; let c = res?; let mut guard = self.data.lock().await; + guard.clusters.agent_ids.insert(c.name.clone(), i); guard.clusters.clusters.insert(c.name.clone(), c); guard.is_ready[i] = true; Ok(()) From c79949b8ffde9867b961192afa6c815b44865ae4 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 23 Oct 2024 16:10:41 +0200 Subject: [PATCH 111/140] fix: Fix Doc lint. (#3158) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fix error found by Doc lint. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. ref ZKD-1855 --- prover/crates/bin/prover_autoscaler/src/global/watcher.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs index 1b54d332ebb..6e02c0fe2fd 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -6,7 +6,6 @@ use reqwest::{ header::{HeaderMap, HeaderValue, CONTENT_TYPE}, Method, }; - use tokio::sync::Mutex; use url::Url; use zksync_utils::http_with_retries::send_request_with_retries; From 84986f44b32d0a7fa68c3f60b1ec266ce663e778 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 23 Oct 2024 17:20:20 +0300 Subject: [PATCH 112/140] test(vm): Run `multivm` tests with shadowing (#3137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Runs shared unit tests in the `multivm` crate for the shadowed VM. ## Why ❔ Allows to cheaply check VM divergences. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/multivm/README.md | 2 + core/lib/multivm/src/versions/mod.rs | 2 + .../{testonly/shadow.rs => shadow/mod.rs} | 2 + core/lib/multivm/src/versions/shadow/tests.rs | 415 ++++++++++++++++++ .../versions/testonly/get_used_contracts.rs | 25 +- core/lib/multivm/src/versions/testonly/mod.rs | 12 +- .../src/versions/testonly/tester/mod.rs | 2 +- .../src/versions/vm_latest/tests/mod.rs | 9 +- core/lib/vm_interface/src/utils/dump.rs | 12 + core/lib/vm_interface/src/utils/mod.rs | 4 +- core/lib/vm_interface/src/utils/shadow.rs | 340 ++++++++------ 11 files changed, 674 insertions(+), 151 deletions(-) rename core/lib/multivm/src/versions/{testonly/shadow.rs => shadow/mod.rs} (99%) create mode 100644 core/lib/multivm/src/versions/shadow/tests.rs diff --git a/core/lib/multivm/README.md b/core/lib/multivm/README.md index f5e8a552242..34883db5990 100644 --- a/core/lib/multivm/README.md +++ b/core/lib/multivm/README.md @@ -14,5 +14,7 @@ If you want to add unit tests for the VM wrapper, consider the following: - Whenever possible, make tests reusable; declare test logic in the [`testonly`](src/versions/testonly/mod.rs) module, and then instantiate tests using this logic for the supported VM versions. If necessary, extend the tested VM trait so that test logic can be defined in a generic way. See the `testonly` module docs for more detailed guidelines. +- If you define a generic test, don't forget to add its instantiations for all supported VMs (`vm_latest`, `vm_fast` and + `shadow`). `shadow` tests allow checking VM divergences for free! - Do not use an RNG where it can be avoided (e.g., for test contract addresses). - Avoid using zero / default values in cases they can be treated specially by the tested code. diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index 1df706a6cce..b6523b3d474 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,3 +1,5 @@ +#[cfg(test)] +mod shadow; mod shared; #[cfg(test)] mod testonly; diff --git a/core/lib/multivm/src/versions/testonly/shadow.rs b/core/lib/multivm/src/versions/shadow/mod.rs similarity index 99% rename from core/lib/multivm/src/versions/testonly/shadow.rs rename to core/lib/multivm/src/versions/shadow/mod.rs index 6a7d42b06fc..fe9ce8eefcb 100644 --- a/core/lib/multivm/src/versions/testonly/shadow.rs +++ b/core/lib/multivm/src/versions/shadow/mod.rs @@ -28,6 +28,8 @@ use crate::{ vm_latest::HistoryEnabled, }; +mod tests; + type ReferenceVm = vm_latest::Vm, HistoryEnabled>; type ShadowedFastVm = crate::vm_instance::ShadowedFastVm; diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs new file mode 100644 index 00000000000..64179f59be1 --- /dev/null +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -0,0 +1,415 @@ +//! Unit tests from the `testonly` test suite. + +use std::collections::HashSet; + +use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H256, U256}; + +use super::ShadowedFastVm; +use crate::{ + interface::{ + utils::{ShadowMut, ShadowRef}, + CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, + }, + versions::testonly::TestedVm, +}; + +impl TestedVm for ShadowedFastVm { + type StateDump = (); + + fn dump_state(&self) -> Self::StateDump { + // Do nothing + } + + fn gas_remaining(&mut self) -> u32 { + self.get_mut("gas_remaining", |r| match r { + ShadowMut::Main(vm) => vm.gas_remaining(), + ShadowMut::Shadow(vm) => vm.gas_remaining(), + }) + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_custom("current_execution_state", |r| match r { + ShadowRef::Main(vm) => vm.get_current_execution_state(), + ShadowRef::Shadow(vm) => vm.get_current_execution_state(), + }) + } + + fn decommitted_hashes(&self) -> HashSet { + self.get("decommitted_hashes", |r| match r { + ShadowRef::Main(vm) => vm.decommitted_hashes(), + ShadowRef::Shadow(vm) => TestedVm::decommitted_hashes(vm), + }) + } + + fn execute_with_state_diffs( + &mut self, + diffs: Vec, + mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.get_custom_mut("execute_with_state_diffs", |r| match r { + ShadowMut::Main(vm) => vm.execute_with_state_diffs(diffs.clone(), mode), + ShadowMut::Shadow(vm) => vm.execute_with_state_diffs(diffs.clone(), mode), + }) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + self.get_mut("insert_bytecodes", |r| match r { + ShadowMut::Main(vm) => vm.insert_bytecodes(bytecodes), + ShadowMut::Shadow(vm) => TestedVm::insert_bytecodes(vm, bytecodes), + }); + } + + fn known_bytecode_hashes(&self) -> HashSet { + self.get("known_bytecode_hashes", |r| match r { + ShadowRef::Main(vm) => vm.known_bytecode_hashes(), + ShadowRef::Shadow(vm) => vm.known_bytecode_hashes(), + }) + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + self.get_mut("manually_decommit", |r| match r { + ShadowMut::Main(vm) => vm.manually_decommit(code_hash), + ShadowMut::Shadow(vm) => vm.manually_decommit(code_hash), + }) + } + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]) { + self.get("verify_required_bootloader_heap", |r| match r { + ShadowRef::Main(vm) => vm.verify_required_bootloader_heap(cells), + ShadowRef::Shadow(vm) => vm.verify_required_bootloader_heap(cells), + }); + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + self.get_mut("manually_decommit", |r| match r { + ShadowMut::Main(vm) => vm.write_to_bootloader_heap(cells), + ShadowMut::Shadow(vm) => TestedVm::write_to_bootloader_heap(vm, cells), + }); + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + self.get_mut("read_storage", |r| match r { + ShadowMut::Main(vm) => vm.read_storage(key), + ShadowMut::Shadow(vm) => vm.read_storage(key), + }) + } + + fn last_l2_block_hash(&self) -> H256 { + self.get("last_l2_block_hash", |r| match r { + ShadowRef::Main(vm) => vm.last_l2_block_hash(), + ShadowRef::Shadow(vm) => vm.last_l2_block_hash(), + }) + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.get_mut("push_l2_block_unchecked", |r| match r { + ShadowMut::Main(vm) => vm.push_l2_block_unchecked(block), + ShadowMut::Shadow(vm) => vm.push_l2_block_unchecked(block), + }); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.get_mut("push_transaction_with_refund", |r| match r { + ShadowMut::Main(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + ShadowMut::Shadow(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + }); + } +} + +mod block_tip { + use crate::versions::testonly::block_tip::*; + + #[test] + fn dry_run_upper_bound() { + test_dry_run_upper_bound::(); + } +} + +mod bootloader { + use crate::versions::testonly::bootloader::*; + + #[test] + fn dummy_bootloader() { + test_dummy_bootloader::(); + } + + #[test] + fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::(); + } +} + +mod bytecode_publishing { + use crate::versions::testonly::bytecode_publishing::*; + + #[test] + fn bytecode_publishing() { + test_bytecode_publishing::(); + } +} + +mod circuits { + use crate::versions::testonly::circuits::*; + + #[test] + fn circuits() { + test_circuits::(); + } +} + +mod code_oracle { + use crate::versions::testonly::code_oracle::*; + + #[test] + fn code_oracle() { + test_code_oracle::(); + } + + #[test] + fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::(); + } + + #[test] + fn refunds_in_code_oracle() { + test_refunds_in_code_oracle::(); + } +} + +mod default_aa { + use crate::versions::testonly::default_aa::*; + + #[test] + fn default_aa_interaction() { + test_default_aa_interaction::(); + } +} + +mod gas_limit { + use crate::versions::testonly::gas_limit::*; + + #[test] + fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::(); + } +} + +mod get_used_contracts { + use crate::versions::testonly::get_used_contracts::*; + + #[test] + fn get_used_contracts() { + test_get_used_contracts::(); + } + + #[test] + fn get_used_contracts_with_far_call() { + test_get_used_contracts_with_far_call::(); + } + + #[test] + fn get_used_contracts_with_out_of_gas_far_call() { + test_get_used_contracts_with_out_of_gas_far_call::(); + } +} + +mod is_write_initial { + use crate::versions::testonly::is_write_initial::*; + + #[test] + fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::(); + } +} + +mod l1_tx_execution { + use crate::versions::testonly::l1_tx_execution::*; + + #[test] + fn l1_tx_execution() { + test_l1_tx_execution::(); + } + + #[test] + fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::(); + } +} + +mod l2_blocks { + use crate::versions::testonly::l2_blocks::*; + + #[test] + fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::(); + } + + #[test] + fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::(); + } + + #[test] + fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::(); + } + + #[test] + fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::(); + } + + #[test] + fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::(); + } +} + +mod nonce_holder { + use crate::versions::testonly::nonce_holder::*; + + #[test] + fn nonce_holder() { + test_nonce_holder::(); + } +} + +mod precompiles { + use crate::versions::testonly::precompiles::*; + + #[test] + fn keccak() { + test_keccak::(); + } + + #[test] + fn sha256() { + test_sha256::(); + } + + #[test] + fn ecrecover() { + test_ecrecover::(); + } +} + +mod refunds { + use crate::versions::testonly::refunds::*; + + #[test] + fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::(); + } + + #[test] + fn negative_pubdata_for_transaction() { + test_negative_pubdata_for_transaction::(); + } +} + +mod require_eip712 { + use crate::versions::testonly::require_eip712::*; + + #[test] + fn require_eip712() { + test_require_eip712::(); + } +} + +mod rollbacks { + use crate::versions::testonly::rollbacks::*; + + #[test] + fn vm_rollbacks() { + test_vm_rollbacks::(); + } + + #[test] + fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::(); + } + + #[test] + fn rollback_in_call_mode() { + test_rollback_in_call_mode::(); + } +} + +mod secp256r1 { + use crate::versions::testonly::secp256r1::*; + + #[test] + fn secp256r1() { + test_secp256r1::(); + } +} + +mod simple_execution { + use crate::versions::testonly::simple_execution::*; + + #[test] + fn estimate_fee() { + test_estimate_fee::(); + } + + #[test] + fn simple_execute() { + test_simple_execute::(); + } +} + +mod storage { + use crate::versions::testonly::storage::*; + + #[test] + fn storage_behavior() { + test_storage_behavior::(); + } + + #[test] + fn transient_storage_behavior() { + test_transient_storage_behavior::(); + } +} + +mod tracing_execution_error { + use crate::versions::testonly::tracing_execution_error::*; + + #[test] + fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::(); + } +} + +mod transfer { + use crate::versions::testonly::transfer::*; + + #[test] + fn send_and_transfer() { + test_send_and_transfer::(); + } + + #[test] + fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::(); + } +} + +mod upgrade { + use crate::versions::testonly::upgrade::*; + + #[test] + fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::(); + } + + #[test] + fn force_deploy_upgrade() { + test_force_deploy_upgrade::(); + } + + #[test] + fn complex_upgrader() { + test_complex_upgrader::(); + } +} diff --git a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs index fbad94a0eee..d3ffee20c34 100644 --- a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, iter}; +use std::iter; use assert_matches::assert_matches; use ethabi::Token; @@ -11,7 +11,7 @@ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use super::{ read_proxy_counter_contract, read_test_contract, tester::{VmTester, VmTesterBuilder}, - TestedVm, BASE_SYSTEM_CONTRACTS, + TestedVm, }; use crate::{ interface::{ @@ -27,7 +27,7 @@ pub(crate) fn test_get_used_contracts() { .with_rich_accounts(1) .build::(); - assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); + assert!(vm.vm.known_bytecode_hashes().is_empty()); // create and push and execute some not-empty factory deps transaction with success status // to check that `get_decommitted_hashes()` updates @@ -44,10 +44,7 @@ pub(crate) fn test_get_used_contracts() { .contains(&h256_to_u256(tx.bytecode_hash))); // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm.decommitted_hashes(), - known_bytecodes_without_base_system_contracts(&vm.vm) - ); + assert_eq!(vm.vm.decommitted_hashes(), vm.vm.known_bytecode_hashes()); // create push and execute some non-empty factory deps transaction that fails // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) @@ -80,23 +77,11 @@ pub(crate) fn test_get_used_contracts() { for factory_dep in tx2.execute.factory_deps { let hash = hash_bytecode(&factory_dep); let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_base_system_contracts(&vm.vm).contains(&hash_to_u256)); + assert!(vm.vm.known_bytecode_hashes().contains(&hash_to_u256)); assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); } } -fn known_bytecodes_without_base_system_contracts(vm: &impl TestedVm) -> HashSet { - let mut known_bytecodes_without_base_system_contracts = vm.known_bytecode_hashes(); - known_bytecodes_without_base_system_contracts - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { - let was_removed = - known_bytecodes_without_base_system_contracts.remove(&h256_to_u256(evm_emulator.hash)); - assert!(was_removed); - } - known_bytecodes_without_base_system_contracts -} - /// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial /// decommitment cost (>10,000 gas). fn inflated_counter_bytecode() -> Vec { diff --git a/core/lib/multivm/src/versions/testonly/mod.rs b/core/lib/multivm/src/versions/testonly/mod.rs index 838ba98a9aa..74cda6a9522 100644 --- a/core/lib/multivm/src/versions/testonly/mod.rs +++ b/core/lib/multivm/src/versions/testonly/mod.rs @@ -9,6 +9,8 @@ //! - Tests use [`VmTester`] built using [`VmTesterBuilder`] to create a VM instance. This allows to set up storage for the VM, //! custom [`SystemEnv`] / [`L1BatchEnv`], deployed contracts, pre-funded accounts etc. +use std::collections::HashSet; + use ethabi::Contract; use once_cell::sync::Lazy; use zksync_contracts::{ @@ -20,7 +22,7 @@ use zksync_types::{ utils::storage_key_for_eth_balance, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, U256, }; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, u256_to_h256}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; pub(super) use self::tester::{TestedVm, VmTester, VmTesterBuilder}; @@ -45,7 +47,6 @@ pub(super) mod refunds; pub(super) mod require_eip712; pub(super) mod rollbacks; pub(super) mod secp256r1; -mod shadow; pub(super) mod simple_execution; pub(super) mod storage; mod tester; @@ -133,6 +134,13 @@ pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { } } +pub(crate) fn filter_out_base_system_contracts(all_bytecode_hashes: &mut HashSet) { + all_bytecode_hashes.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + all_bytecode_hashes.remove(&h256_to_u256(evm_emulator.hash)); + } +} + pub(super) fn default_system_env() -> SystemEnv { SystemEnv { zk_porter_available: false, diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs index 4bab9bca610..7432322e0c8 100644 --- a/core/lib/multivm/src/versions/testonly/tester/mod.rs +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -195,7 +195,7 @@ pub(crate) trait TestedVm: fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]); - /// Includes bytecodes that have failed to decommit. + /// Includes bytecodes that have failed to decommit. Should exclude base system contract bytecodes (default AA / EVM emulator). fn known_bytecode_hashes(&self) -> HashSet; /// Returns `true` iff the decommit is fresh. diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index 2835f5b6faa..6f748d543d3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -14,7 +14,7 @@ use crate::{ storage::{InMemoryStorage, ReadStorage, StorageView, WriteStorage}, CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, }, - versions::testonly::TestedVm, + versions::testonly::{filter_out_base_system_contracts, TestedVm}, vm_latest::{ constants::BOOTLOADER_HEAP_PAGE, old_vm::{event_sink::InMemoryEventSink, history_recorder::HistoryRecorder}, @@ -104,13 +104,16 @@ impl TestedVm for TestedLatestVm { } fn known_bytecode_hashes(&self) -> HashSet { - self.state + let mut bytecode_hashes: HashSet<_> = self + .state .decommittment_processor .known_bytecodes .inner() .keys() .copied() - .collect() + .collect(); + filter_out_base_system_contracts(&mut bytecode_hashes); + bytecode_hashes } fn manually_decommit(&mut self, code_hash: H256) -> bool { diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs index 522a455a11b..4076aa72270 100644 --- a/core/lib/vm_interface/src/utils/dump.rs +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -139,6 +139,18 @@ impl DumpingVm { } } +impl AsRef for DumpingVm { + fn as_ref(&self) -> &Vm { + &self.inner + } +} + +impl AsMut for DumpingVm { + fn as_mut(&mut self) -> &mut Vm { + &mut self.inner + } +} + impl VmInterface for DumpingVm { type TracerDispatcher = Vm::TracerDispatcher; diff --git a/core/lib/vm_interface/src/utils/mod.rs b/core/lib/vm_interface/src/utils/mod.rs index 80a51c7b144..394df7fc9a1 100644 --- a/core/lib/vm_interface/src/utils/mod.rs +++ b/core/lib/vm_interface/src/utils/mod.rs @@ -2,7 +2,9 @@ pub use self::{ dump::VmDump, - shadow::{DivergenceErrors, DivergenceHandler, ShadowVm}, + shadow::{ + CheckDivergence, DivergenceErrors, DivergenceHandler, ShadowMut, ShadowRef, ShadowVm, + }, }; mod dump; diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs index 8cdc899238e..e8ef87c3c7f 100644 --- a/core/lib/vm_interface/src/utils/shadow.rs +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -1,4 +1,5 @@ use std::{ + any, cell::RefCell, collections::{BTreeMap, BTreeSet}, fmt, @@ -65,6 +66,154 @@ impl VmWithReporting { } } +/// Reference to either the main or shadow VM. +#[derive(Debug)] +pub enum ShadowRef<'a, Main, Shadow> { + /// Reference to the main VM. + Main(&'a Main), + /// Reference to the shadow VM. + Shadow(&'a Shadow), +} + +/// Mutable reference to either the main or shadow VM. +#[derive(Debug)] +pub enum ShadowMut<'a, Main, Shadow> { + /// Reference to the main VM. + Main(&'a mut Main), + /// Reference to the shadow VM. + Shadow(&'a mut Shadow), +} + +/// Type that can check divergence between its instances. +pub trait CheckDivergence { + /// Checks divergences and returns a list of divergence errors, if any. + fn check_divergence(&self, other: &Self) -> DivergenceErrors; +} + +#[derive(Debug)] +struct DivergingEq(T); + +impl CheckDivergence for DivergingEq { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match(any::type_name::(), &self.0, &other.0); + errors + } +} + +impl CheckDivergence for CurrentExecutionState { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match("final_state.events", &self.events, &other.events); + errors.check_match( + "final_state.user_l2_to_l1_logs", + &self.user_l2_to_l1_logs, + &other.user_l2_to_l1_logs, + ); + errors.check_match( + "final_state.system_logs", + &self.system_logs, + &other.system_logs, + ); + errors.check_match( + "final_state.storage_refunds", + &self.storage_refunds, + &other.storage_refunds, + ); + errors.check_match( + "final_state.pubdata_costs", + &self.pubdata_costs, + &other.pubdata_costs, + ); + errors.check_match( + "final_state.used_contract_hashes", + &self.used_contract_hashes.iter().collect::>(), + &other.used_contract_hashes.iter().collect::>(), + ); + + let main_deduplicated_logs = DivergenceErrors::gather_logs(&self.deduplicated_storage_logs); + let shadow_deduplicated_logs = + DivergenceErrors::gather_logs(&other.deduplicated_storage_logs); + errors.check_match( + "deduplicated_storage_logs", + &main_deduplicated_logs, + &shadow_deduplicated_logs, + ); + errors + } +} + +impl CheckDivergence for VmExecutionResultAndLogs { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match("result", &self.result, &other.result); + errors.check_match("logs.events", &self.logs.events, &other.logs.events); + errors.check_match( + "logs.system_l2_to_l1_logs", + &self.logs.system_l2_to_l1_logs, + &other.logs.system_l2_to_l1_logs, + ); + errors.check_match( + "logs.user_l2_to_l1_logs", + &self.logs.user_l2_to_l1_logs, + &other.logs.user_l2_to_l1_logs, + ); + let main_logs = UniqueStorageLogs::new(&self.logs.storage_logs); + let shadow_logs = UniqueStorageLogs::new(&other.logs.storage_logs); + errors.check_match("logs.storage_logs", &main_logs, &shadow_logs); + errors.check_match("refunds", &self.refunds, &other.refunds); + errors.check_match( + "statistics.circuit_statistic", + &self.statistics.circuit_statistic, + &other.statistics.circuit_statistic, + ); + errors.check_match( + "statistics.pubdata_published", + &self.statistics.pubdata_published, + &other.statistics.pubdata_published, + ); + errors.check_match( + "statistics.gas_remaining", + &self.statistics.gas_remaining, + &other.statistics.gas_remaining, + ); + errors.check_match( + "statistics.gas_used", + &self.statistics.gas_used, + &other.statistics.gas_used, + ); + errors.check_match( + "statistics.computational_gas_used", + &self.statistics.computational_gas_used, + &other.statistics.computational_gas_used, + ); + errors + } +} + +impl CheckDivergence for FinishedL1Batch { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.extend( + self.block_tip_execution_result + .check_divergence(&other.block_tip_execution_result), + ); + errors.extend( + self.final_execution_state + .check_divergence(&other.final_execution_state), + ); + + errors.check_match( + "final_bootloader_memory", + &self.final_bootloader_memory, + &other.final_bootloader_memory, + ); + errors.check_match("pubdata_input", &self.pubdata_input, &other.pubdata_input); + errors.check_match("state_diffs", &self.state_diffs, &other.state_diffs); + errors + } +} + /// Shadowed VM that executes 2 VMs for each operation and compares their outputs. /// /// If a divergence is detected, the VM state is dumped using [a pluggable handler](Self::set_dump_handler()), @@ -105,6 +254,66 @@ where pub fn dump_state(&self) -> VmDump { self.main.dump_state() } + + /// Gets the specified value from both the main and shadow VM, checking whether it matches on both. + pub fn get(&self, name: &str, mut action: impl FnMut(ShadowRef<'_, Main, Shadow>) -> R) -> R + where + R: PartialEq + fmt::Debug + 'static, + { + self.get_custom(name, |r| DivergingEq(action(r))).0 + } + + /// Same as [`Self::get()`], but uses custom divergence checks for the type encapsulated in the [`CheckDivergence`] trait. + pub fn get_custom( + &self, + name: &str, + mut action: impl FnMut(ShadowRef<'_, Main, Shadow>) -> R, + ) -> R { + let main_output = action(ShadowRef::Main(self.main.as_ref())); + let borrow = self.shadow.borrow(); + if let Some(shadow) = &*borrow { + let shadow_output = action(ShadowRef::Shadow(&shadow.vm)); + let errors = main_output.check_divergence(&shadow_output); + if let Err(err) = errors.into_result() { + drop(borrow); + self.report_shared(err.context(format!("get({name})"))); + } + } + main_output + } + + /// Gets the specified value from both the main and shadow VM, potentially changing their state + /// and checking whether the returned value matches. + pub fn get_mut( + &mut self, + name: &str, + mut action: impl FnMut(ShadowMut<'_, Main, Shadow>) -> R, + ) -> R + where + R: PartialEq + fmt::Debug + 'static, + { + self.get_custom_mut(name, |r| DivergingEq(action(r))).0 + } + + /// Same as [`Self::get_mut()`], but uses custom divergence checks for the type encapsulated in the [`CheckDivergence`] trait. + pub fn get_custom_mut( + &mut self, + name: &str, + mut action: impl FnMut(ShadowMut<'_, Main, Shadow>) -> R, + ) -> R + where + R: CheckDivergence, + { + let main_output = action(ShadowMut::Main(self.main.as_mut())); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_output = action(ShadowMut::Shadow(&mut shadow.vm)); + let errors = main_output.check_divergence(&shadow_output); + if let Err(err) = errors.into_result() { + self.report_shared(err.context(format!("get_mut({name})"))); + } + } + main_output + } } impl ShadowVm @@ -151,7 +360,6 @@ where } } -/// **Important.** This doesn't properly handle tracers; they are not passed to the shadow VM! impl VmInterface for ShadowVm where S: ReadStorage, @@ -197,9 +405,7 @@ where let main_result = self.main.inspect(main_tracer, execution_mode); if let Some(shadow) = self.shadow.get_mut() { let shadow_result = shadow.vm.inspect(shadow_tracer, execution_mode); - let mut errors = DivergenceErrors::new(); - errors.check_results_match(&main_result, &shadow_result); - + let errors = main_result.check_divergence(&shadow_result); if let Err(err) = errors.into_result() { let ctx = format!("executing VM with mode {execution_mode:?}"); self.report(err.context(ctx)); @@ -240,8 +446,7 @@ where tx, with_compression, ); - let mut errors = DivergenceErrors::new(); - errors.check_results_match(&main_tx_result, &shadow_result.1); + let errors = main_tx_result.check_divergence(&shadow_result.1); if let Err(err) = errors.into_result() { let ctx = format!( "inspecting transaction {tx_repr}, with_compression={with_compression:?}" @@ -256,31 +461,7 @@ where let main_batch = self.main.finish_batch(); if let Some(shadow) = self.shadow.get_mut() { let shadow_batch = shadow.vm.finish_batch(); - let mut errors = DivergenceErrors::new(); - errors.check_results_match( - &main_batch.block_tip_execution_result, - &shadow_batch.block_tip_execution_result, - ); - errors.check_final_states_match( - &main_batch.final_execution_state, - &shadow_batch.final_execution_state, - ); - errors.check_match( - "final_bootloader_memory", - &main_batch.final_bootloader_memory, - &shadow_batch.final_bootloader_memory, - ); - errors.check_match( - "pubdata_input", - &main_batch.pubdata_input, - &shadow_batch.pubdata_input, - ); - errors.check_match( - "state_diffs", - &main_batch.state_diffs, - &shadow_batch.state_diffs, - ); - + let errors = main_batch.check_divergence(&shadow_batch); if let Err(err) = errors.into_result() { self.report(err); } @@ -321,63 +502,15 @@ impl DivergenceErrors { } } + fn extend(&mut self, from: Self) { + self.divergences.extend(from.divergences); + } + fn context(mut self, context: String) -> Self { self.context = Some(context); self } - fn check_results_match( - &mut self, - main_result: &VmExecutionResultAndLogs, - shadow_result: &VmExecutionResultAndLogs, - ) { - self.check_match("result", &main_result.result, &shadow_result.result); - self.check_match( - "logs.events", - &main_result.logs.events, - &shadow_result.logs.events, - ); - self.check_match( - "logs.system_l2_to_l1_logs", - &main_result.logs.system_l2_to_l1_logs, - &shadow_result.logs.system_l2_to_l1_logs, - ); - self.check_match( - "logs.user_l2_to_l1_logs", - &main_result.logs.user_l2_to_l1_logs, - &shadow_result.logs.user_l2_to_l1_logs, - ); - let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); - let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); - self.check_match("logs.storage_logs", &main_logs, &shadow_logs); - self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); - self.check_match( - "statistics.circuit_statistic", - &main_result.statistics.circuit_statistic, - &shadow_result.statistics.circuit_statistic, - ); - self.check_match( - "statistics.pubdata_published", - &main_result.statistics.pubdata_published, - &shadow_result.statistics.pubdata_published, - ); - self.check_match( - "statistics.gas_remaining", - &main_result.statistics.gas_remaining, - &shadow_result.statistics.gas_remaining, - ); - self.check_match( - "statistics.gas_used", - &main_result.statistics.gas_used, - &shadow_result.statistics.gas_used, - ); - self.check_match( - "statistics.computational_gas_used", - &main_result.statistics.computational_gas_used, - &shadow_result.statistics.computational_gas_used, - ); - } - fn check_match(&mut self, context: &str, main: &T, shadow: &T) { if main != shadow { let comparison = pretty_assertions::Comparison::new(main, shadow); @@ -386,47 +519,6 @@ impl DivergenceErrors { } } - fn check_final_states_match( - &mut self, - main: &CurrentExecutionState, - shadow: &CurrentExecutionState, - ) { - self.check_match("final_state.events", &main.events, &shadow.events); - self.check_match( - "final_state.user_l2_to_l1_logs", - &main.user_l2_to_l1_logs, - &shadow.user_l2_to_l1_logs, - ); - self.check_match( - "final_state.system_logs", - &main.system_logs, - &shadow.system_logs, - ); - self.check_match( - "final_state.storage_refunds", - &main.storage_refunds, - &shadow.storage_refunds, - ); - self.check_match( - "final_state.pubdata_costs", - &main.pubdata_costs, - &shadow.pubdata_costs, - ); - self.check_match( - "final_state.used_contract_hashes", - &main.used_contract_hashes.iter().collect::>(), - &shadow.used_contract_hashes.iter().collect::>(), - ); - - let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); - let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); - self.check_match( - "deduplicated_storage_logs", - &main_deduplicated_logs, - &shadow_deduplicated_logs, - ); - } - fn gather_logs(logs: &[StorageLog]) -> BTreeMap { logs.iter() .filter(|log| log.is_write()) From 0aecae1e02d31d34d1ccc0ddf54617174d134e55 Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Wed, 23 Oct 2024 18:00:00 +0200 Subject: [PATCH 113/140] feat(zkstack_cli): Add --dev flag to chain init and genesis (#3152) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add `--dev` flag to chain init and genesis. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../crates/zkstack/completion/_zkstack.zsh | 19 ++--- .../crates/zkstack/completion/zkstack.fish | 7 +- .../crates/zkstack/completion/zkstack.sh | 10 +-- .../src/commands/chain/args/genesis.rs | 4 +- .../src/commands/chain/args/init/mod.rs | 73 +++++++++++++------ .../src/commands/ecosystem/args/init.rs | 24 ++++-- .../zkstack/src/commands/ecosystem/init.rs | 9 ++- zkstack_cli/crates/zkstack/src/messages.rs | 4 +- 8 files changed, 94 insertions(+), 56 deletions(-) diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh index a8a60a6130a..b985f5b9334 100644 --- a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -131,12 +131,10 @@ _arguments "${_arguments_options[@]}" : \ '--observability=[Enable Grafana]' \ '--chain=[Chain to use]:CHAIN: ' \ '--resume[]' \ -'-u[Use default database urls and names]' \ -'--use-default[Use default database urls and names]' \ '-d[]' \ '--dont-drop[]' \ '--ecosystem-only[Initialize ecosystem only and skip chain initialization (chain can be initialized later with \`chain init\` subcommand)]' \ -'--dev[Deploy ecosystem using all defaults. Suitable for local development]' \ +'--dev[Use defaults for all options and flags. Suitable for local development]' \ '--no-port-reallocation[Do not reallocate ports]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -286,11 +284,10 @@ _arguments "${_arguments_options[@]}" : \ '--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ '--chain=[Chain to use]:CHAIN: ' \ '--resume[]' \ -'-u[Use default database urls and names]' \ -'--use-default[Use default database urls and names]' \ '-d[]' \ '--dont-drop[]' \ '--no-port-reallocation[Do not reallocate ports]' \ +'--dev[Use defaults for all options and flags. Suitable for local development]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -312,8 +309,8 @@ _arguments "${_arguments_options[@]}" : \ '--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ '--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ '--chain=[Chain to use]:CHAIN: ' \ -'-u[Use default database urls and names]' \ -'--use-default[Use default database urls and names]' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ '-d[]' \ '--dont-drop[]' \ '--no-port-reallocation[Do not reallocate ports]' \ @@ -357,8 +354,8 @@ _arguments "${_arguments_options[@]}" : \ '--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ '--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ '--chain=[Chain to use]:CHAIN: ' \ -'-u[Use default database urls and names]' \ -'--use-default[Use default database urls and names]' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ '-d[]' \ '--dont-drop[]' \ '-v[Verbose mode]' \ @@ -381,8 +378,8 @@ _arguments "${_arguments_options[@]}" : \ '--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ '--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ '--chain=[Chain to use]:CHAIN: ' \ -'-u[Use default database urls and names]' \ -'--use-default[Use default database urls and names]' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ '-d[]' \ '--dont-drop[]' \ '-v[Verbose mode]' \ diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.fish b/zkstack_cli/crates/zkstack/completion/zkstack.fish index d490085e615..f90bcf2c4ac 100644 --- a/zkstack_cli/crates/zkstack/completion/zkstack.fish +++ b/zkstack_cli/crates/zkstack/completion/zkstack.fish @@ -107,10 +107,9 @@ complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_se complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s o -l observability -d 'Enable Grafana' -r -f -a "{true\t'',false\t''}" complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l resume -complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s u -l use-default -d 'Use default database urls and names' complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s d -l dont-drop complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ecosystem-only -d 'Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand)' -complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l dev -d 'Deploy ecosystem using all defaults. Suitable for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l dev -d 'Use defaults for all options and flags. Suitable for local development' complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' @@ -185,9 +184,9 @@ complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_s complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l l1-rpc-url -d 'L1 RPC URL' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l resume -complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s u -l use-default -d 'Use default database urls and names' complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s d -l dont-drop complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l dev -d 'Use defaults for all options and flags. Suitable for local development' complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help (see more with \'--help\')' @@ -196,7 +195,7 @@ complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_s complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-url -d 'Server database url without database name' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-name -d 'Server database name' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l chain -d 'Chain to use' -r -complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s u -l use-default -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s d -l dev -d 'Use default database urls and names' complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s d -l dont-drop complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s v -l verbose -d 'Verbose mode' complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l ignore-prerequisites -d 'Ignores prerequisites checks' diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.sh b/zkstack_cli/crates/zkstack/completion/zkstack.sh index 27639acd50b..d21480bba2c 100644 --- a/zkstack_cli/crates/zkstack/completion/zkstack.sh +++ b/zkstack_cli/crates/zkstack/completion/zkstack.sh @@ -1441,7 +1441,7 @@ _zkstack() { return 0 ;; zkstack__chain__genesis) - opts="-u -d -v -h --server-db-url --server-db-name --use-default --dont-drop --verbose --chain --ignore-prerequisites --help init-database server help" + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --verbose --chain --ignore-prerequisites --help init-database server help" if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) return 0 @@ -1523,7 +1523,7 @@ _zkstack() { return 0 ;; zkstack__chain__genesis__init__database) - opts="-u -d -v -h --server-db-url --server-db-name --use-default --dont-drop --verbose --chain --ignore-prerequisites --help" + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --verbose --chain --ignore-prerequisites --help" if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) return 0 @@ -1819,7 +1819,7 @@ _zkstack() { return 0 ;; zkstack__chain__init) - opts="-a -u -d -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --server-db-url --server-db-name --use-default --dont-drop --deploy-paymaster --l1-rpc-url --no-port-reallocation --verbose --chain --ignore-prerequisites --help configs help" + opts="-a -d -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --server-db-url --server-db-name --dont-drop --deploy-paymaster --l1-rpc-url --no-port-reallocation --dev --verbose --chain --ignore-prerequisites --help configs help" if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) return 0 @@ -1877,7 +1877,7 @@ _zkstack() { return 0 ;; zkstack__chain__init__configs) - opts="-u -d -v -h --server-db-url --server-db-name --use-default --dont-drop --l1-rpc-url --no-port-reallocation --verbose --chain --ignore-prerequisites --help" + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --l1-rpc-url --no-port-reallocation --verbose --chain --ignore-prerequisites --help" if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) return 0 @@ -4829,7 +4829,7 @@ _zkstack() { return 0 ;; zkstack__ecosystem__init) - opts="-a -u -d -o -v -h --deploy-erc20 --deploy-ecosystem --ecosystem-contracts-path --l1-rpc-url --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --deploy-paymaster --server-db-url --server-db-name --use-default --dont-drop --ecosystem-only --dev --observability --no-port-reallocation --verbose --chain --ignore-prerequisites --help" + opts="-a -d -o -v -h --deploy-erc20 --deploy-ecosystem --ecosystem-contracts-path --l1-rpc-url --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --deploy-paymaster --server-db-url --server-db-name --dont-drop --ecosystem-only --dev --observability --no-port-reallocation --verbose --chain --ignore-prerequisites --help" if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) return 0 diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs index aaf995985a3..f990cbfd77d 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs @@ -21,7 +21,7 @@ pub struct GenesisArgs { #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] pub server_db_name: Option, #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] - pub use_default: bool, + pub dev: bool, #[clap(long, short, action)] pub dont_drop: bool, } @@ -30,7 +30,7 @@ impl GenesisArgs { pub fn fill_values_with_prompt(self, config: &ChainConfig) -> GenesisArgsFinal { let DBNames { server_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); - if self.use_default { + if self.dev { GenesisArgsFinal { server_db: DatabaseConfig::new(DATABASE_SERVER_URL.clone(), server_name), dont_drop: self.dont_drop, diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs index d92de9a0641..a5c7a6890ca 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs @@ -9,8 +9,9 @@ use crate::{ commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, defaults::LOCAL_RPC_URL, messages::{ - MSG_DEPLOY_PAYMASTER_PROMPT, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, + MSG_DEPLOY_PAYMASTER_PROMPT, MSG_DEV_ARG_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, + MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, }, }; @@ -22,45 +23,70 @@ pub struct InitArgs { #[clap(flatten)] #[serde(flatten)] pub forge_args: ForgeScriptArgs, - #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] - #[serde(flatten)] - pub genesis_args: GenesisArgs, + #[clap(long, help = MSG_SERVER_DB_URL_HELP)] + pub server_db_url: Option, + #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] + pub server_db_name: Option, + #[clap(long, short, action)] + pub dont_drop: bool, #[clap(long, default_missing_value = "true", num_args = 0..=1)] pub deploy_paymaster: Option, #[clap(long, help = MSG_L1_RPC_URL_HELP)] pub l1_rpc_url: Option, #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, + #[clap(long, help = MSG_DEV_ARG_HELP)] + pub dev: bool, } impl InitArgs { + pub fn get_genesis_args(&self) -> GenesisArgs { + GenesisArgs { + server_db_url: self.server_db_url.clone(), + server_db_name: self.server_db_name.clone(), + dev: self.dev, + dont_drop: self.dont_drop, + } + } + pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitArgsFinal { - let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { - common::PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) - .default(true) - .ask() - }); + let genesis = self.get_genesis_args(); + + let deploy_paymaster = if self.dev { + true + } else { + self.deploy_paymaster.unwrap_or_else(|| { + common::PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) + .default(true) + .ask() + }) + }; - let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { - let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); - if config.l1_network == L1Network::Localhost { - prompt = prompt.default(LOCAL_RPC_URL); - } - prompt - .validate_with(|val: &String| -> Result<(), String> { - Url::parse(val) - .map(|_| ()) - .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) - }) - .ask() - }); + let l1_rpc_url = if self.dev { + LOCAL_RPC_URL.to_string() + } else { + self.l1_rpc_url.unwrap_or_else(|| { + let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); + if config.l1_network == L1Network::Localhost { + prompt = prompt.default(LOCAL_RPC_URL); + } + prompt + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }) + }; InitArgsFinal { forge_args: self.forge_args, - genesis_args: self.genesis_args.fill_values_with_prompt(config), + genesis_args: genesis.fill_values_with_prompt(config), deploy_paymaster, l1_rpc_url, no_port_reallocation: self.no_port_reallocation, + dev: self.dev, } } } @@ -72,4 +98,5 @@ pub struct InitArgsFinal { pub deploy_paymaster: bool, pub l1_rpc_url: String, pub no_port_reallocation: bool, + pub dev: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs index a77a9c28ca9..09115fd49ba 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs @@ -11,9 +11,9 @@ use crate::{ defaults::LOCAL_RPC_URL, messages::{ MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEV_ARG_HELP, - MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, - MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, - MSG_OBSERVABILITY_PROMPT, + MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, + MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, MSG_OBSERVABILITY_PROMPT, + MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, }, }; @@ -86,9 +86,12 @@ pub struct EcosystemInitArgs { /// Deploy Paymaster contract #[clap(long, default_missing_value = "true", num_args = 0..=1)] pub deploy_paymaster: Option, - #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] - #[serde(flatten)] - pub genesis_args: GenesisArgs, + #[clap(long, help = MSG_SERVER_DB_URL_HELP)] + pub server_db_url: Option, + #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] + pub server_db_name: Option, + #[clap(long, short, action)] + pub dont_drop: bool, /// Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand) #[clap(long, default_value_t = false)] pub ecosystem_only: bool, @@ -101,6 +104,15 @@ pub struct EcosystemInitArgs { } impl EcosystemInitArgs { + pub fn get_genesis_args(&self) -> GenesisArgs { + GenesisArgs { + server_db_url: self.server_db_url.clone(), + server_db_name: self.server_db_name.clone(), + dev: self.dev, + dont_drop: self.dont_drop, + } + } + pub fn fill_values_with_prompt(self, l1_network: L1Network) -> EcosystemInitArgsFinal { let deploy_erc20 = if self.dev { true diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs index 6e006f8d65d..06b9b916111 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs @@ -341,10 +341,10 @@ async fn init_chains( }; // Set default values for dev mode let mut deploy_paymaster = init_args.deploy_paymaster; - let mut genesis_args = init_args.genesis_args.clone(); + let mut genesis_args = init_args.get_genesis_args().clone(); if final_init_args.dev { deploy_paymaster = Some(true); - genesis_args.use_default = true; + genesis_args.dev = true; } // Can't initialize multiple chains with the same DB if list_of_chains.len() > 1 { @@ -359,10 +359,13 @@ async fn init_chains( let chain_init_args = chain::args::init::InitArgs { forge_args: final_init_args.forge_args.clone(), - genesis_args: genesis_args.clone(), + server_db_url: genesis_args.server_db_url.clone(), + server_db_name: genesis_args.server_db_name.clone(), + dont_drop: genesis_args.dont_drop, deploy_paymaster, l1_rpc_url: Some(final_init_args.ecosystem.l1_rpc_url.clone()), no_port_reallocation: final_init_args.no_port_reallocation, + dev: final_init_args.dev, }; let final_chain_init_args = chain_init_args.fill_values_with_prompt(&chain_config); diff --git a/zkstack_cli/crates/zkstack/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs index e2145c18ffd..b9786dc4d8d 100644 --- a/zkstack_cli/crates/zkstack/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -15,6 +15,8 @@ pub(super) const MSG_SELECTED_CONFIG: &str = "Selected config"; pub(super) const MSG_CHAIN_NOT_INITIALIZED: &str = "Chain not initialized. Please create a chain first"; pub(super) const MSG_ARGS_VALIDATOR_ERR: &str = "Invalid arguments"; +pub(super) const MSG_DEV_ARG_HELP: &str = + "Use defaults for all options and flags. Suitable for local development"; /// Autocomplete message pub(super) fn msg_generate_autocomplete_file(filename: &str) -> String { @@ -61,8 +63,6 @@ pub(super) fn msg_path_to_zksync_does_not_exist_err(path: &str) -> String { pub(super) const MSG_L1_RPC_URL_HELP: &str = "L1 RPC URL"; pub(super) const MSG_NO_PORT_REALLOCATION_HELP: &str = "Do not reallocate ports"; pub(super) const MSG_GENESIS_ARGS_HELP: &str = "Genesis options"; -pub(super) const MSG_DEV_ARG_HELP: &str = - "Deploy ecosystem using all defaults. Suitable for local development"; pub(super) const MSG_OBSERVABILITY_HELP: &str = "Enable Grafana"; pub(super) const MSG_OBSERVABILITY_PROMPT: &str = "Do you want to setup observability? (Grafana)"; pub(super) const MSG_DEPLOY_ECOSYSTEM_PROMPT: &str = From 724d9a9c7f2127263845b640c843e751fd3c21ae Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Wed, 23 Oct 2024 18:01:00 +0200 Subject: [PATCH 114/140] feat(zkstack_cli): Build dependencies at zkstack build time (#3157) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Build dependencies (e.g., `yarn install`) at `zkstack` build time. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- zkstack_cli/crates/zkstack/Cargo.toml | 1 + zkstack_cli/crates/zkstack/build.rs | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/zkstack_cli/crates/zkstack/Cargo.toml b/zkstack_cli/crates/zkstack/Cargo.toml index 93a78c751b1..85ab8081eaa 100644 --- a/zkstack_cli/crates/zkstack/Cargo.toml +++ b/zkstack_cli/crates/zkstack/Cargo.toml @@ -55,4 +55,5 @@ anyhow.workspace = true clap_complete.workspace = true dirs.workspace = true ethers.workspace = true +xshell.workspace = true zksync_protobuf_build.workspace = true diff --git a/zkstack_cli/crates/zkstack/build.rs b/zkstack_cli/crates/zkstack/build.rs index bccf5bae89f..e52e952bf73 100644 --- a/zkstack_cli/crates/zkstack/build.rs +++ b/zkstack_cli/crates/zkstack/build.rs @@ -2,6 +2,7 @@ use std::path::{Path, PathBuf}; use anyhow::{anyhow, Context}; use ethers::contract::Abigen; +use xshell::{cmd, Shell}; const COMPLETION_DIR: &str = "completion"; @@ -14,6 +15,11 @@ fn main() -> anyhow::Result<()> { .write_to_file(outdir.join("consensus_registry_abi.rs")) .context("Failed to write ABI to file")?; + if let Err(e) = build_dependencies() { + println!("cargo:error=It was not possible to install projects dependencies"); + println!("cargo:error={}", e); + } + if let Err(e) = configure_shell_autocompletion() { println!("cargo:warning=It was not possible to install autocomplete scripts. Please generate them manually with `zkstack autocomplete`"); println!("cargo:error={}", e); @@ -130,3 +136,14 @@ impl ShellAutocomplete for clap_complete::Shell { Ok(()) } } + +fn build_dependencies() -> anyhow::Result<()> { + let shell = Shell::new()?; + let code_dir = Path::new("../"); + + let _dir_guard = shell.push_dir(code_dir); + + cmd!(shell, "yarn install") + .run() + .context("Failed to install dependencies") +} From 340a1786e32c8a4130ae4dafa26b8a545d0b3648 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 23 Oct 2024 18:07:33 +0200 Subject: [PATCH 115/140] docs: Add manual installation instruction for zkstack_cli (#3160) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add manual installation instruction for zkstack_cli. ## Why ❔ Using `curl | bash` could be problematic, we should provide better way. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- zkstack_cli/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/zkstack_cli/README.md b/zkstack_cli/README.md index f1c92cc3d2e..e8116508821 100644 --- a/zkstack_cli/README.md +++ b/zkstack_cli/README.md @@ -30,6 +30,16 @@ zkstackup --local This command installs `zkstack` from the current repository. +#### Manual installation + +Run from the repository root: + +```bash +cargo install --path zkstack_cli/crates/zkstack --force --locked +``` + +And make sure that `.cargo/bin` is included into `PATH`. + ### Foundry Integration Foundry is used for deploying smart contracts. Pass flags for Foundry integration with the `-a` option, e.g., From e7b587ac8afee4d27fb9dd9ab0f5c02beafbc42c Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Wed, 23 Oct 2024 18:16:03 +0200 Subject: [PATCH 116/140] ci: Reduce GAR builder disk usage by moving keys around. (#3161) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Move keys from prover to circuit-prover as the build sequentially. Remove prover-gar image after it being uploaded. ## Why ❔ To allow running this on a machine with 100G disk. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- ...ild-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml index b92fb8e8111..4639f8c77c4 100644 --- a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml @@ -28,7 +28,6 @@ jobs: - name: Download Setup data run: | gsutil -m rsync -r gs://matterlabs-setup-data-us/${{ inputs.setup_keys_id }} docker/prover-gpu-fri-gar - cp -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ - name: Login to us-central1 GAR run: | @@ -70,6 +69,14 @@ jobs: --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + - name: Remove prover-gpu-fri-gar image to free space + run: | + docker image rm us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + + - name: Move Setup data from prover-gpu-fri-gar to circuit-prover-gpu-gar + run: | + mv -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ + - name: Build and push circuit-prover-gpu-gar uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: From d0f61b0552dcacc2e8e33fdbcae6f1e5fbb43820 Mon Sep 17 00:00:00 2001 From: Ivan Schasny <31857042+ischasny@users.noreply.github.com> Date: Wed, 23 Oct 2024 18:43:34 +0100 Subject: [PATCH 117/140] fix: update logging in cbt l1 behaviour (#3149) Stop converting `BigDecimal` to `BigInt` when logging to avoid loosing precision. --- .../base_token_adjuster/src/base_token_l1_behaviour.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs index 0199b06ebd6..0922101e59d 100644 --- a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs +++ b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs @@ -6,7 +6,7 @@ use std::{ }; use anyhow::Context; -use bigdecimal::{num_bigint::ToBigInt, BigDecimal, Zero}; +use bigdecimal::{BigDecimal, Zero}; use zksync_config::BaseTokenAdjusterConfig; use zksync_eth_client::{BoundEthInterface, CallFunctionArgs, Options}; use zksync_node_fee_model::l1_gas_price::TxParamsProvider; @@ -57,7 +57,7 @@ impl BaseTokenL1Behaviour { self.update_last_persisted_l1_ratio(prev_ratio.clone()); tracing::info!( "Fetched current base token ratio from the L1: {}", - prev_ratio.to_bigint().unwrap() + prev_ratio ); prev_ratio }; @@ -71,7 +71,7 @@ impl BaseTokenL1Behaviour { "Skipping L1 update. current_ratio {}, previous_ratio {}, deviation {}", current_ratio, prev_ratio, - deviation.to_bigint().unwrap() + deviation ); return Ok(()); } @@ -98,7 +98,7 @@ impl BaseTokenL1Behaviour { new_ratio.denominator.get(), base_fee_per_gas, priority_fee_per_gas, - deviation.to_bigint().unwrap() + deviation ); METRICS .l1_gas_used From 4629450d2d40c2c4c28255e51d5bb67d588ba837 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:33:32 +0300 Subject: [PATCH 118/140] ci: unify fmt check (#3159) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ prover ci should check formatting with zkstack_cli ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/ci-prover-reusable.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 6cb9c26d21e..4154885549b 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -40,7 +40,9 @@ jobs: ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Formatting - run: ci_run bash -c "cd prover && cargo fmt --check" + run: | + ci_run git config --global --add safe.directory /usr/src/zksync + ci_run zkstack dev fmt --check rustfmt unit-tests: runs-on: [ matterlabs-ci-runner-highmem-long ] From 561fc1bddfc79061dab9d8d150baa06acfa90692 Mon Sep 17 00:00:00 2001 From: QEDK <1994constant@gmail.com> Date: Thu, 24 Oct 2024 06:04:10 +0530 Subject: [PATCH 119/140] feat: Implement gas relay mode and inclusion data for data attestation (#3070) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR adds gas relay API support for gasless submission to the Avail network, it also provides the attestation implementation necessary for data attestation. ## Why ❔ Gas relay API support is required for Avail partners that choose to pay in a different token. Data attestation ensures that arbitrary tx data cannot be used for rollup finality and that no data withholding attack can occur. ## Checklist - [X] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [X] Documentation comments have been added / updated. - [X] Code has been formatted via `zk fmt` and `zk lint`. Supersedes #2987 --------- Co-authored-by: vibhurajeev Co-authored-by: dimazhornyk Co-authored-by: Dima Zhornyk <55756184+dimazhornyk@users.noreply.github.com> --- .gitignore | 1 + Cargo.lock | 3 + Cargo.toml | 3 +- core/lib/basic_types/src/api_key.rs | 20 ++ core/lib/basic_types/src/lib.rs | 1 + .../lib/config/src/configs/da_client/avail.rs | 28 ++- core/lib/config/src/testonly.rs | 16 +- core/lib/env_config/src/da_client.rs | 80 +++++-- core/lib/protobuf_config/src/da_client.rs | 53 +++- .../src/proto/config/da_client.proto | 20 +- .../src/proto/config/secrets.proto | 1 + core/lib/protobuf_config/src/secrets.rs | 54 ++++- core/node/da_clients/Cargo.toml | 3 + core/node/da_clients/src/avail/client.rs | 226 +++++++++++++++--- core/node/da_clients/src/avail/sdk.rs | 100 +++++++- 15 files changed, 512 insertions(+), 97 deletions(-) create mode 100644 core/lib/basic_types/src/api_key.rs diff --git a/.gitignore b/.gitignore index 86ed40c7041..adf3b779961 100644 --- a/.gitignore +++ b/.gitignore @@ -115,6 +115,7 @@ prover/data/keys/setup_* # ZK Stack CLI chains/era/configs/* chains/gateway/* +chains/avail/* configs/* era-observability/ core/tests/ts-integration/deployments-zk diff --git a/Cargo.lock b/Cargo.lock index 7e4cad34cf8..a42ef8e3fdc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10028,14 +10028,17 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "backon", "base58", "blake2 0.10.6", "blake2b_simd", + "bytes", "flate2", "futures 0.3.30", "hex", "jsonrpsee 0.23.2", "parity-scale-codec", + "reqwest 0.12.7", "scale-encode", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index f1e70e7f302..0f8e6ba77ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,6 +110,7 @@ backon = "0.4.4" bigdecimal = "0.4.5" bincode = "1" blake2 = "0.10" +bytes = "1" chrono = "0.4" clap = "4.2.2" codegen = "0.2.0" @@ -155,7 +156,7 @@ rayon = "1.3.1" regex = "1" reqwest = "0.12" rlp = "0.5" -rocksdb = "0.21.0" +rocksdb = "0.21" rustc_version = "0.4.0" rustls = "0.23" secp256k1 = { version = "0.27.0", features = ["recovery", "global-context"] } diff --git a/core/lib/basic_types/src/api_key.rs b/core/lib/basic_types/src/api_key.rs new file mode 100644 index 00000000000..eadf4e9051b --- /dev/null +++ b/core/lib/basic_types/src/api_key.rs @@ -0,0 +1,20 @@ +use std::str::FromStr; + +use secrecy::{ExposeSecret, Secret}; + +#[derive(Debug, Clone)] +pub struct APIKey(pub Secret); + +impl PartialEq for APIKey { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for APIKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(APIKey(s.parse()?)) + } +} diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 79c7b3924e3..7953f362fd4 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -24,6 +24,7 @@ use serde::{de, Deserialize, Deserializer, Serialize}; #[macro_use] mod macros; +pub mod api_key; pub mod basic_fri_types; pub mod commitment; pub mod network; diff --git a/core/lib/config/src/configs/da_client/avail.rs b/core/lib/config/src/configs/da_client/avail.rs index 590dc5fef18..b8e9db0f393 100644 --- a/core/lib/config/src/configs/da_client/avail.rs +++ b/core/lib/config/src/configs/da_client/avail.rs @@ -1,16 +1,38 @@ use serde::Deserialize; -use zksync_basic_types::seed_phrase::SeedPhrase; +use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase}; + +pub const AVAIL_GAS_RELAY_CLIENT_NAME: &str = "GasRelay"; +pub const AVAIL_FULL_CLIENT_NAME: &str = "FullClient"; + +#[derive(Clone, Debug, PartialEq, Deserialize)] +#[serde(tag = "avail_client")] +pub enum AvailClientConfig { + FullClient(AvailDefaultConfig), + GasRelay(AvailGasRelayConfig), +} #[derive(Clone, Debug, PartialEq, Deserialize)] pub struct AvailConfig { - pub api_node_url: String, pub bridge_api_url: String, - pub app_id: u32, pub timeout: usize, + #[serde(flatten)] + pub config: AvailClientConfig, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailDefaultConfig { + pub api_node_url: String, + pub app_id: u32, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailGasRelayConfig { + pub gas_relay_api_url: String, pub max_retries: usize, } #[derive(Clone, Debug, PartialEq)] pub struct AvailSecrets { pub seed_phrase: Option, + pub gas_relay_api_key: Option, } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 9b1ec13e2d2..880bc5aa98d 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -3,6 +3,7 @@ use std::num::NonZeroUsize; use rand::{distributions::Distribution, Rng}; use secrecy::Secret; use zksync_basic_types::{ + api_key::APIKey, basic_fri_types::CircuitIdRoundTuple, commitment::L1BatchCommitmentMode, network::Network, @@ -17,7 +18,12 @@ use zksync_crypto_primitives::K256PrivateKey; use crate::{ configs::{ - self, da_client::DAClientConfig::Avail, external_price_api_client::ForcedPriceClientConfig, + self, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::Avail, + }, + external_price_api_client::ForcedPriceClientConfig, }, AvailConfig, }; @@ -935,11 +941,12 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::da_client::DAClientConfig { Avail(AvailConfig { - api_node_url: self.sample(rng), bridge_api_url: self.sample(rng), - app_id: self.sample(rng), timeout: self.sample(rng), - max_retries: self.sample(rng), + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: self.sample(rng), + app_id: self.sample(rng), + }), }) } } @@ -948,6 +955,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::secrets::DataAvailabilitySecrets { configs::secrets::DataAvailabilitySecrets::Avail(configs::da_client::avail::AvailSecrets { seed_phrase: Some(SeedPhrase(Secret::new(self.sample(rng)))), + gas_relay_api_key: Some(APIKey(Secret::new(self.sample(rng)))), }) } } diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 0fc3ad216f8..1043786fc1e 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -2,19 +2,34 @@ use std::env; use zksync_config::configs::{ da_client::{ - avail::AvailSecrets, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, - OBJECT_STORE_CLIENT_CONFIG_NAME, + avail::{ + AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, + }, + DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, }, secrets::DataAvailabilitySecrets, + AvailConfig, }; use crate::{envy_load, FromEnv}; impl FromEnv for DAClientConfig { fn from_env() -> anyhow::Result { - let client_tag = std::env::var("DA_CLIENT")?; + let client_tag = env::var("DA_CLIENT")?; let config = match client_tag.as_str() { - AVAIL_CLIENT_CONFIG_NAME => Self::Avail(envy_load("da_avail_config", "DA_")?), + AVAIL_CLIENT_CONFIG_NAME => Self::Avail(AvailConfig { + bridge_api_url: env::var("DA_BRIDGE_API_URL").ok().unwrap(), + timeout: env::var("DA_TIMEOUT")?.parse()?, + config: match env::var("DA_AVAIL_CLIENT_TYPE")?.as_str() { + AVAIL_FULL_CLIENT_NAME => { + AvailClientConfig::FullClient(envy_load("da_avail_full_client", "DA_")?) + } + AVAIL_GAS_RELAY_CLIENT_NAME => { + AvailClientConfig::GasRelay(envy_load("da_avail_gas_relay", "DA_")?) + } + _ => anyhow::bail!("Unknown Avail DA client type"), + }, + }), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -30,11 +45,21 @@ impl FromEnv for DataAvailabilitySecrets { let client_tag = std::env::var("DA_CLIENT")?; let secrets = match client_tag.as_str() { AVAIL_CLIENT_CONFIG_NAME => { - let seed_phrase = env::var("DA_SECRETS_SEED_PHRASE") - .ok() - .map(|s| s.parse()) - .transpose()?; - Self::Avail(AvailSecrets { seed_phrase }) + let seed_phrase: Option = + env::var("DA_SECRETS_SEED_PHRASE") + .ok() + .map(|s| s.parse().unwrap()); + let gas_relay_api_key: Option = + env::var("DA_SECRETS_GAS_RELAY_API_KEY") + .ok() + .map(|s| s.parse().unwrap()); + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + anyhow::bail!("No secrets provided for Avail DA client"); + } + Self::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) } _ => anyhow::bail!("Unknown DA client name: {}", client_tag), }; @@ -47,7 +72,10 @@ impl FromEnv for DataAvailabilitySecrets { mod tests { use zksync_config::{ configs::{ - da_client::{DAClientConfig, DAClientConfig::ObjectStore}, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::{self, ObjectStore}, + }, object_store::ObjectStoreMode::GCS, }, AvailConfig, ObjectStoreConfig, @@ -91,14 +119,14 @@ mod tests { bridge_api_url: &str, app_id: u32, timeout: usize, - max_retries: usize, ) -> DAClientConfig { DAClientConfig::Avail(AvailConfig { - api_node_url: api_node_url.to_string(), bridge_api_url: bridge_api_url.to_string(), - app_id, timeout, - max_retries, + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: api_node_url.to_string(), + app_id, + }), }) } @@ -107,11 +135,13 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" DA_CLIENT="Avail" - DA_API_NODE_URL="localhost:12345" + DA_AVAIL_CLIENT_TYPE="FullClient" + DA_BRIDGE_API_URL="localhost:54321" - DA_APP_ID="1" DA_TIMEOUT="2" - DA_MAX_RETRIES="3" + + DA_API_NODE_URL="localhost:12345" + DA_APP_ID="1" "#; lock.set_env(config); @@ -124,7 +154,6 @@ mod tests { "localhost:54321", "1".parse::().unwrap(), "2".parse::().unwrap(), - "3".parse::().unwrap(), ) ); } @@ -139,15 +168,18 @@ mod tests { lock.set_env(config); - let actual = match DataAvailabilitySecrets::from_env().unwrap() { - DataAvailabilitySecrets::Avail(avail) => avail.seed_phrase, + let (actual_seed, actual_key) = match DataAvailabilitySecrets::from_env().unwrap() { + DataAvailabilitySecrets::Avail(avail) => (avail.seed_phrase, avail.gas_relay_api_key), }; assert_eq!( - actual.unwrap(), - "bottom drive obey lake curtain smoke basket hold race lonely fit walk" - .parse() - .unwrap() + (actual_seed.unwrap(), actual_key), + ( + "bottom drive obey lake curtain smoke basket hold race lonely fit walk" + .parse() + .unwrap(), + None + ) ); } } diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index 1499e88efb4..a17a8711a27 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -1,10 +1,10 @@ use anyhow::Context; -use zksync_config::{ - configs::{ - da_client::DAClientConfig::{Avail, ObjectStore}, - {self}, +use zksync_config::configs::{ + self, + da_client::{ + avail::{AvailClientConfig, AvailConfig, AvailDefaultConfig, AvailGasRelayConfig}, + DAClientConfig::{Avail, ObjectStore}, }, - AvailConfig, }; use zksync_protobuf::{required, ProtoRepr}; @@ -18,15 +18,31 @@ impl ProtoRepr for proto::DataAvailabilityClient { let client = match config { proto::data_availability_client::Config::Avail(conf) => Avail(AvailConfig { - api_node_url: required(&conf.api_node_url) - .context("api_node_url")? - .clone(), bridge_api_url: required(&conf.bridge_api_url) .context("bridge_api_url")? .clone(), - app_id: *required(&conf.app_id).context("app_id")?, timeout: *required(&conf.timeout).context("timeout")? as usize, - max_retries: *required(&conf.max_retries).context("max_retries")? as usize, + config: match conf.config.as_ref() { + Some(proto::avail_config::Config::FullClient(full_client_conf)) => { + AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: required(&full_client_conf.api_node_url) + .context("api_node_url")? + .clone(), + app_id: *required(&full_client_conf.app_id).context("app_id")?, + }) + } + Some(proto::avail_config::Config::GasRelay(gas_relay_conf)) => { + AvailClientConfig::GasRelay(AvailGasRelayConfig { + gas_relay_api_url: required(&gas_relay_conf.gas_relay_api_url) + .context("gas_relay_api_url")? + .clone(), + max_retries: *required(&gas_relay_conf.max_retries) + .context("max_retries")? + as usize, + }) + } + None => return Err(anyhow::anyhow!("Invalid Avail DA configuration")), + }, }), proto::data_availability_client::Config::ObjectStore(conf) => { ObjectStore(object_store_proto::ObjectStore::read(conf)?) @@ -41,11 +57,22 @@ impl ProtoRepr for proto::DataAvailabilityClient { Avail(config) => Self { config: Some(proto::data_availability_client::Config::Avail( proto::AvailConfig { - api_node_url: Some(config.api_node_url.clone()), bridge_api_url: Some(config.bridge_api_url.clone()), - app_id: Some(config.app_id), timeout: Some(config.timeout as u64), - max_retries: Some(config.max_retries as u64), + config: match &config.config { + AvailClientConfig::FullClient(conf) => Some( + proto::avail_config::Config::FullClient(proto::AvailClientConfig { + api_node_url: Some(conf.api_node_url.clone()), + app_id: Some(conf.app_id), + }), + ), + AvailClientConfig::GasRelay(conf) => Some( + proto::avail_config::Config::GasRelay(proto::AvailGasRelayConfig { + gas_relay_api_url: Some(conf.gas_relay_api_url.clone()), + max_retries: Some(conf.max_retries as u64), + }), + ), + }, }, )), }, diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index d01bda2c847..73fa2435996 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -5,12 +5,26 @@ package zksync.config.da_client; import "zksync/config/object_store.proto"; message AvailConfig { - optional string api_node_url = 1; optional string bridge_api_url = 2; - optional uint32 app_id = 4; optional uint64 timeout = 5; - optional uint64 max_retries = 6; + oneof config { + AvailClientConfig full_client = 7; + AvailGasRelayConfig gas_relay = 8; + } + reserved 1; reserved "api_node_url"; reserved 3; reserved "seed"; + reserved 4; reserved "app_id"; + reserved 6; reserved "max_retries"; +} + +message AvailClientConfig { + optional string api_node_url = 1; + optional uint32 app_id = 2; +} + +message AvailGasRelayConfig { + optional string gas_relay_api_url = 1; + optional uint64 max_retries = 2; } message DataAvailabilityClient { diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index 17b915b3f08..43c4542783c 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -21,6 +21,7 @@ message ConsensusSecrets { message AvailSecret { optional string seed_phrase = 1; + optional string gas_relay_api_key = 2; } message DataAvailabilitySecrets { diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index 58735148007..07ab340c231 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -2,7 +2,7 @@ use std::str::FromStr; use anyhow::Context; use secrecy::ExposeSecret; -use zksync_basic_types::{seed_phrase::SeedPhrase, url::SensitiveUrl}; +use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase, url::SensitiveUrl}; use zksync_config::configs::{ consensus::{AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, da_client::avail::AvailSecrets, @@ -103,14 +103,31 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { let secrets = required(&self.da_secrets).context("config")?; let client = match secrets { - DaSecrets::Avail(avail_secret) => DataAvailabilitySecrets::Avail(AvailSecrets { - seed_phrase: Some( - SeedPhrase::from_str( - required(&avail_secret.seed_phrase).context("seed_phrase")?, - ) - .unwrap(), - ), - }), + DaSecrets::Avail(avail_secret) => { + let seed_phrase = match avail_secret.seed_phrase.as_ref() { + Some(seed) => match SeedPhrase::from_str(seed) { + Ok(seed) => Some(seed), + Err(_) => None, + }, + None => None, + }; + let gas_relay_api_key = match avail_secret.gas_relay_api_key.as_ref() { + Some(api_key) => match APIKey::from_str(api_key) { + Ok(api_key) => Some(api_key), + Err(_) => None, + }, + None => None, + }; + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + return Err(anyhow::anyhow!( + "At least one of seed_phrase or gas_relay_api_key must be provided" + )); + } + DataAvailabilitySecrets::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) + } }; Ok(client) @@ -133,7 +150,24 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { None }; - Some(DaSecrets::Avail(AvailSecret { seed_phrase })) + let gas_relay_api_key = if config.gas_relay_api_key.is_some() { + Some( + config + .clone() + .gas_relay_api_key + .unwrap() + .0 + .expose_secret() + .to_string(), + ) + } else { + None + }; + + Some(DaSecrets::Avail(AvailSecret { + seed_phrase, + gas_relay_api_key, + })) } }; diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index 60b65067f48..fa2f15920bd 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -37,3 +37,6 @@ blake2b_simd.workspace = true jsonrpsee = { workspace = true, features = ["ws-client"] } parity-scale-codec = { workspace = true, features = ["derive"] } subxt-signer = { workspace = true, features = ["sr25519", "native"] } +reqwest = { workspace = true } +bytes = { workspace = true } +backon.workspace = true diff --git a/core/node/da_clients/src/avail/client.rs b/core/node/da_clients/src/avail/client.rs index 7718691bf18..46d652d5713 100644 --- a/core/node/da_clients/src/avail/client.rs +++ b/core/node/da_clients/src/avail/client.rs @@ -1,34 +1,133 @@ -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, sync::Arc, time::Duration}; +use anyhow::anyhow; use async_trait::async_trait; use jsonrpsee::ws_client::WsClientBuilder; +use serde::{Deserialize, Serialize}; use subxt_signer::ExposeSecret; -use zksync_config::configs::da_client::avail::{AvailConfig, AvailSecrets}; +use zksync_config::configs::da_client::avail::{AvailClientConfig, AvailConfig, AvailSecrets}; use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; +use zksync_types::{ + ethabi::{self, Token}, + web3::contract::Tokenize, + H256, U256, +}; + +use crate::avail::sdk::{GasRelayClient, RawAvailClient}; -use crate::avail::sdk::RawAvailClient; +#[derive(Debug, Clone)] +enum AvailClientMode { + Default(Box), + GasRelay(GasRelayClient), +} /// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. #[derive(Debug, Clone)] pub struct AvailClient { config: AvailConfig, - sdk_client: Arc, + sdk_client: Arc, + api_client: Arc, // bridge API reqwest client +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct BridgeAPIResponse { + blob_root: Option, + bridge_root: Option, + data_root_index: Option, + data_root_proof: Option>, + leaf: Option, + leaf_index: Option, + leaf_proof: Option>, + range_hash: Option, + error: Option, +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct MerkleProofInput { + // proof of inclusion for the data root + data_root_proof: Vec, + // proof of inclusion of leaf within blob/bridge root + leaf_proof: Vec, + // abi.encodePacked(startBlock, endBlock) of header range commitment on vectorx + range_hash: H256, + // index of the data root in the commitment tree + data_root_index: U256, + // blob root to check proof against, or reconstruct the data root + blob_root: H256, + // bridge root to check proof against, or reconstruct the data root + bridge_root: H256, + // leaf being proven + leaf: H256, + // index of the leaf in the blob/bridge root tree + leaf_index: U256, +} + +impl Tokenize for MerkleProofInput { + fn into_tokens(self) -> Vec { + vec![Token::Tuple(vec![ + Token::Array( + self.data_root_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::Array( + self.leaf_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::FixedBytes(self.range_hash.as_bytes().to_vec()), + Token::Uint(self.data_root_index), + Token::FixedBytes(self.blob_root.as_bytes().to_vec()), + Token::FixedBytes(self.bridge_root.as_bytes().to_vec()), + Token::FixedBytes(self.leaf.as_bytes().to_vec()), + Token::Uint(self.leaf_index), + ])] + } } impl AvailClient { pub async fn new(config: AvailConfig, secrets: AvailSecrets) -> anyhow::Result { - let seed_phrase = secrets - .seed_phrase - .ok_or_else(|| anyhow::anyhow!("seed phrase"))?; - let sdk_client = RawAvailClient::new(config.app_id, seed_phrase.0.expose_secret()).await?; - - Ok(Self { - config, - sdk_client: Arc::new(sdk_client), - }) + let api_client = Arc::new(reqwest::Client::new()); + match config.config.clone() { + AvailClientConfig::GasRelay(conf) => { + let gas_relay_api_key = secrets + .gas_relay_api_key + .ok_or_else(|| anyhow::anyhow!("Gas relay API key is missing"))?; + let gas_relay_client = GasRelayClient::new( + &conf.gas_relay_api_url, + gas_relay_api_key.0.expose_secret(), + conf.max_retries, + Arc::clone(&api_client), + ) + .await?; + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::GasRelay(gas_relay_client)), + api_client, + }) + } + AvailClientConfig::FullClient(conf) => { + let seed_phrase = secrets + .seed_phrase + .ok_or_else(|| anyhow::anyhow!("Seed phrase is missing"))?; + // these unwraps are safe because we validate in protobuf config + let sdk_client = + RawAvailClient::new(conf.app_id, seed_phrase.0.expose_secret()).await?; + + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::Default(Box::new(sdk_client))), + api_client, + }) + } + } } } @@ -39,37 +138,83 @@ impl DataAvailabilityClient for AvailClient { _: u32, // batch_number data: Vec, ) -> anyhow::Result { - let client = WsClientBuilder::default() - .build(self.config.api_node_url.as_str()) - .await - .map_err(to_non_retriable_da_error)?; + match self.sdk_client.as_ref() { + AvailClientMode::Default(client) => { + let default_config = match &self.config.config { + AvailClientConfig::FullClient(conf) => conf, + _ => unreachable!(), // validated in protobuf config + }; + let ws_client = WsClientBuilder::default() + .build(default_config.api_node_url.clone().as_str()) + .await + .map_err(to_non_retriable_da_error)?; - let extrinsic = self - .sdk_client - .build_extrinsic(&client, data) - .await - .map_err(to_non_retriable_da_error)?; + let extrinsic = client + .build_extrinsic(&ws_client, data) + .await + .map_err(to_non_retriable_da_error)?; - let block_hash = self - .sdk_client - .submit_extrinsic(&client, extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - let tx_id = self - .sdk_client - .get_tx_id(&client, block_hash.as_str(), extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - - Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + let block_hash = client + .submit_extrinsic(&ws_client, extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + let tx_id = client + .get_tx_id(&ws_client, block_hash.as_str(), extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + } + AvailClientMode::GasRelay(client) => { + let (block_hash, extrinsic_index) = client + .post_data(data) + .await + .map_err(to_retriable_da_error)?; + Ok(DispatchResponse { + blob_id: format!("{:x}:{}", block_hash, extrinsic_index), + }) + } + } } async fn get_inclusion_data( &self, - _blob_id: &str, + blob_id: &str, ) -> anyhow::Result, DAError> { - // TODO: implement inclusion data retrieval - Ok(Some(InclusionData { data: vec![] })) + let (block_hash, tx_idx) = blob_id.split_once(':').ok_or_else(|| DAError { + error: anyhow!("Invalid blob ID format"), + is_retriable: false, + })?; + let url = format!( + "{}/eth/proof/{}?index={}", + self.config.bridge_api_url, block_hash, tx_idx + ); + + let response = self + .api_client + .get(&url) + .timeout(Duration::from_secs(self.config.timeout as u64)) + .send() + .await + .map_err(to_retriable_da_error)?; + + let bridge_api_data = response + .json::() + .await + .map_err(to_retriable_da_error)?; + + let attestation_data: MerkleProofInput = MerkleProofInput { + data_root_proof: bridge_api_data.data_root_proof.unwrap(), + leaf_proof: bridge_api_data.leaf_proof.unwrap(), + range_hash: bridge_api_data.range_hash.unwrap(), + data_root_index: bridge_api_data.data_root_index.unwrap(), + blob_root: bridge_api_data.blob_root.unwrap(), + bridge_root: bridge_api_data.bridge_root.unwrap(), + leaf: bridge_api_data.leaf.unwrap(), + leaf_index: bridge_api_data.leaf_index.unwrap(), + }; + Ok(Some(InclusionData { + data: ethabi::encode(&attestation_data.into_tokens()), + })) } fn clone_boxed(&self) -> Box { @@ -87,3 +232,10 @@ pub fn to_non_retriable_da_error(error: impl Into) -> DAError { is_retriable: false, } } + +pub fn to_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: true, + } +} diff --git a/core/node/da_clients/src/avail/sdk.rs b/core/node/da_clients/src/avail/sdk.rs index 002422109d0..f693280ba4a 100644 --- a/core/node/da_clients/src/avail/sdk.rs +++ b/core/node/da_clients/src/avail/sdk.rs @@ -1,18 +1,22 @@ //! Minimal reimplementation of the Avail SDK client required for the DA client implementation. //! This is considered to be a temporary solution until a mature SDK is available on crates.io -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc, time}; +use backon::{ConstantBuilder, Retryable}; +use bytes::Bytes; use jsonrpsee::{ core::client::{Client, ClientT, Subscription, SubscriptionClientT}, rpc_params, }; use parity_scale_codec::{Compact, Decode, Encode}; use scale_encode::EncodeAsFields; +use serde::{Deserialize, Serialize}; use subxt_signer::{ bip39::Mnemonic, sr25519::{Keypair, Signature}, }; +use zksync_types::H256; use crate::avail::client::to_non_retriable_da_error; @@ -287,7 +291,7 @@ impl RawAvailClient { let status = sub.next().await.transpose()?; if status.is_some() && status.as_ref().unwrap().is_object() { - if let Some(block_hash) = status.unwrap().get("inBlock") { + if let Some(block_hash) = status.unwrap().get("finalized") { break block_hash .as_str() .ok_or_else(|| anyhow::anyhow!("Invalid block hash"))? @@ -369,3 +373,95 @@ fn ss58hash(data: &[u8]) -> Vec { ctx.update(data); ctx.finalize().to_vec() } + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Debug, Clone)] +pub(crate) struct GasRelayClient { + api_url: String, + api_key: String, + max_retries: usize, + api_client: Arc, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmissionResponse { + submission_id: String, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPIStatusResponse { + submission: GasRelayAPISubmission, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmission { + block_hash: Option, + extrinsic_index: Option, +} + +impl GasRelayClient { + const DEFAULT_INCLUSION_DELAY: time::Duration = time::Duration::from_secs(60); + const RETRY_DELAY: time::Duration = time::Duration::from_secs(5); + pub(crate) async fn new( + api_url: &str, + api_key: &str, + max_retries: usize, + api_client: Arc, + ) -> anyhow::Result { + Ok(Self { + api_url: api_url.to_owned(), + api_key: api_key.to_owned(), + max_retries, + api_client, + }) + } + + pub(crate) async fn post_data(&self, data: Vec) -> anyhow::Result<(H256, u64)> { + let submit_url = format!("{}/user/submit_raw_data?token=ethereum", &self.api_url); + // send the data to the gas relay + let submit_response = self + .api_client + .post(&submit_url) + .body(Bytes::from(data)) + .header("Content-Type", "text/plain") + .header("Authorization", &self.api_key) + .send() + .await?; + + let submit_response = submit_response + .json::() + .await?; + + let status_url = format!( + "{}/user/get_submission_info?submission_id={}", + self.api_url, submit_response.submission_id + ); + + tokio::time::sleep(Self::DEFAULT_INCLUSION_DELAY).await; + let status_response = (|| async { + self.api_client + .get(&status_url) + .header("Authorization", &self.api_key) + .send() + .await + }) + .retry( + &ConstantBuilder::default() + .with_delay(Self::RETRY_DELAY) + .with_max_times(self.max_retries), + ) + .await?; + + let status_response = status_response.json::().await?; + let (block_hash, extrinsic_index) = ( + status_response.submission.block_hash.ok_or_else(|| { + anyhow::anyhow!("Block hash not found in the response from the gas relay") + })?, + status_response.submission.extrinsic_index.ok_or_else(|| { + anyhow::anyhow!("Extrinsic index not found in the response from the gas relay") + })?, + ); + + Ok((block_hash, extrinsic_index)) + } +} From 04f4daef85b618b76dda618906b9d8b09cddfe58 Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Thu, 24 Oct 2024 15:04:47 +1100 Subject: [PATCH 120/140] chore: lower no base token ratio log level (#3141) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Lowers no base token ratio log level to `WARN` instead of `ERROR` ## Why ❔ It gets printed >500 times during integration tests making it hard to find the actual errors. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/node/base_token_adjuster/src/base_token_ratio_provider.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs index e16ea16ff0f..b613e5219dd 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs @@ -81,7 +81,7 @@ impl DBBaseTokenRatioProvider { // Though the DB should be populated very soon after the server starts, it is possible // to have no ratios in the DB right after genesis. Having initial ratios in the DB // from the genesis stage will eliminate this possibility. - tracing::error!("No latest price found in the database. Using default ratio."); + tracing::warn!("No latest price found in the database. Using default ratio."); BaseTokenConversionRatio::default() } Err(err) => anyhow::bail!("Failed to get latest base token ratio: {:?}", err), From 6719429312fdf5137459711aa54da16e550c4919 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Thu, 24 Oct 2024 09:58:16 +0200 Subject: [PATCH 121/140] ci: Remove invalid step from GAR build workflow (#3164) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ There is no need to remove local docker image. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml index 4639f8c77c4..30990889caf 100644 --- a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml @@ -69,10 +69,6 @@ jobs: --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Remove prover-gpu-fri-gar image to free space - run: | - docker image rm us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Move Setup data from prover-gpu-fri-gar to circuit-prover-gpu-gar run: | mv -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ From 16f275756cd28024a6b11ac1ac327eb5b8b446e1 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:07:20 +0300 Subject: [PATCH 122/140] feat: gateway preparation (#3006) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - adds new fields to DB tables and rust structs - adds new config variables - update commitment generator to work with post-gateway - adds new vm subversion (vm fast is not changed yet) ## Why ❔ prepare for gateway, reduce sync-layer-stable diff ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- Cargo.lock | 2 + core/bin/external_node/src/config/mod.rs | 8 + core/bin/external_node/src/node_builder.rs | 20 +- core/bin/snapshots_creator/src/tests.rs | 1 + .../system-constants-generator/src/utils.rs | 9 +- core/bin/zksync_server/src/node_builder.rs | 6 +- core/lib/basic_types/src/commitment.rs | 24 +- core/lib/basic_types/src/protocol_version.rs | 7 + core/lib/basic_types/src/vm.rs | 1 + core/lib/config/src/configs/contracts.rs | 9 + core/lib/config/src/testonly.rs | 2 + core/lib/constants/src/contracts.rs | 26 +- core/lib/constants/src/lib.rs | 1 + core/lib/constants/src/message_root.rs | 5 + core/lib/constants/src/system_logs.rs | 11 +- core/lib/contracts/src/lib.rs | 14 + ...01396dacefc0cea8cbcf5807185eb00fc0f7.json} | 30 +- ...0cc9e176729744c779fee97ca9392ae8a8c8.json} | 18 +- ...11345ef888824e0ca3c5f39befbbc5bd0388.json} | 7 +- ...892118f5732374e62f35e27800422afb5746.json} | 30 +- ...2f38816f163a3e3fba4fdbb81076b969e970.json} | 30 +- ...911add046315e5f8877bc57a34e3dadf9e37.json} | 30 +- ...7bd02627ebaf2df7c5ad517cb60a243182d2.json} | 16 +- ...3369701d7cd5f75ca031bf77ca27d0437cb9.json} | 30 +- ...33f6503bc79cc9f809d35c558e275ba117ba.json} | 8 +- ...806fcc54d73216a7dc54be6ba210ef02d789.json} | 30 +- ...0e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json} | 30 +- ...673e4b5bba059ebe07bbbb64578881db030b.json} | 30 +- ...7999eabb611338925abe9dc9e64c837183d9.json} | 18 +- .../20240925103531_gateway_upgrade.down.sql | 8 + .../20240925103531_gateway_upgrade.up.sql | 11 + core/lib/dal/src/blocks_dal.rs | 92 +- core/lib/dal/src/consensus/conv.rs | 45 +- core/lib/dal/src/consensus/mod.rs | 5 +- core/lib/dal/src/consensus/proto/mod.proto | 11 + core/lib/dal/src/consensus/tests.rs | 11 +- core/lib/dal/src/consensus_dal/tests.rs | 3 + core/lib/dal/src/eth_watcher_dal.rs | 2 +- core/lib/dal/src/lib.rs | 4 +- core/lib/dal/src/models/storage_block.rs | 16 +- core/lib/dal/src/models/storage_sync.rs | 17 +- core/lib/dal/src/sync_dal.rs | 4 +- core/lib/dal/src/tests/mod.rs | 2 + core/lib/env_config/src/contracts.rs | 4 + core/lib/eth_client/src/clients/http/query.rs | 85 +- core/lib/eth_client/src/clients/mock.rs | 34 +- core/lib/multivm/Cargo.toml | 1 + core/lib/multivm/src/lib.rs | 1 + core/lib/multivm/src/pubdata_builders/mod.rs | 24 + .../multivm/src/pubdata_builders/rollup.rs | 128 ++ .../lib/multivm/src/pubdata_builders/tests.rs | 123 + .../lib/multivm/src/pubdata_builders/utils.rs | 70 + .../multivm/src/pubdata_builders/validium.rs | 93 + core/lib/multivm/src/utils/events.rs | 56 +- core/lib/multivm/src/utils/mod.rs | 69 +- core/lib/multivm/src/versions/shadow/mod.rs | 1 - core/lib/multivm/src/versions/shadow/tests.rs | 26 +- .../src/versions/testonly/block_tip.rs | 8 +- .../src/versions/testonly/bootloader.rs | 6 +- .../versions/testonly/bytecode_publishing.rs | 8 +- .../multivm/src/versions/testonly/circuits.rs | 4 +- .../src/versions/testonly/code_oracle.rs | 10 +- .../src/versions/testonly/default_aa.rs | 8 +- .../versions/testonly/get_used_contracts.rs | 7 +- .../src/versions/testonly/is_write_initial.rs | 6 +- .../src/versions/testonly/l1_tx_execution.rs | 14 +- .../src/versions/testonly/l2_blocks.rs | 18 +- core/lib/multivm/src/versions/testonly/mod.rs | 13 +- .../src/versions/testonly/nonce_holder.rs | 4 +- .../src/versions/testonly/precompiles.rs | 8 +- .../multivm/src/versions/testonly/refunds.rs | 27 +- .../src/versions/testonly/require_eip712.rs | 8 +- .../src/versions/testonly/secp256r1.rs | 4 +- .../src/versions/testonly/simple_execution.rs | 16 +- .../multivm/src/versions/testonly/storage.rs | 8 +- .../src/versions/testonly/tester/mod.rs | 16 +- .../testonly/tester/transaction_test_info.rs | 13 +- .../multivm/src/versions/testonly/transfer.rs | 22 +- .../multivm/src/versions/testonly/upgrade.rs | 22 +- core/lib/multivm/src/versions/vm_1_3_2/vm.rs | 17 +- .../vm_1_4_1/tracers/pubdata_tracer.rs | 3 +- .../vm_1_4_1/types/internals/pubdata.rs | 2 +- core/lib/multivm/src/versions/vm_1_4_1/vm.rs | 15 +- .../vm_1_4_2/tracers/pubdata_tracer.rs | 3 +- .../vm_1_4_2/types/internals/pubdata.rs | 2 +- core/lib/multivm/src/versions/vm_1_4_2/vm.rs | 11 +- .../tracers/pubdata_tracer.rs | 3 +- .../types/internals/pubdata.rs | 2 +- .../src/versions/vm_boojum_integration/vm.rs | 11 +- .../multivm/src/versions/vm_fast/pubdata.rs | 2 +- .../multivm/src/versions/vm_fast/tests/mod.rs | 17 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 81 +- .../vm_latest/bootloader_state/state.rs | 43 +- .../vm_latest/bootloader_state/utils.rs | 77 +- .../src/versions/vm_latest/constants.rs | 3 +- .../vm_latest/implementation/execution.rs | 1 + .../versions/vm_latest/tests/call_tracer.rs | 6 +- .../src/versions/vm_latest/tests/mod.rs | 25 +- .../vm_latest/tests/prestate_tracer.rs | 10 +- .../src/versions/vm_latest/tests/rollbacks.rs | 10 +- .../vm_latest/tracers/pubdata_tracer.rs | 30 +- .../versions/vm_latest/types/internals/mod.rs | 2 - .../vm_latest/types/internals/pubdata.rs | 123 - .../vm_latest/types/internals/vm_state.rs | 1 + core/lib/multivm/src/versions/vm_latest/vm.rs | 35 +- core/lib/multivm/src/versions/vm_m5/vm.rs | 19 +- core/lib/multivm/src/versions/vm_m6/vm.rs | 14 +- .../src/versions/vm_refunds_enhancement/vm.rs | 13 +- .../src/versions/vm_virtual_blocks/vm.rs | 11 +- core/lib/multivm/src/vm_instance.rs | 28 +- core/lib/protobuf_config/src/contracts.rs | 16 + .../src/proto/config/contracts.proto | 2 + core/lib/prover_interface/src/inputs.rs | 5 +- core/lib/snapshots_applier/src/tests/utils.rs | 1 + core/lib/state/src/test_utils.rs | 1 + core/lib/tee_verifier/src/lib.rs | 10 +- core/lib/types/src/api/en.rs | 4 +- core/lib/types/src/api/mod.rs | 1 + core/lib/types/src/block.rs | 3 +- core/lib/types/src/commitment/mod.rs | 140 +- core/lib/types/src/commitment/tests/mod.rs | 5 + .../tests/post_boojum_1_4_1_test.json | 33 +- .../tests/post_boojum_1_4_2_test.json | 33 +- .../tests/post_boojum_1_5_0_test.json | 187 +- .../post_boojum_1_5_0_test_with_evm.json | 187 +- .../commitment/tests/post_gateway_test.json | 1977 +++++++++++++++++ core/lib/types/src/l2_to_l1_log.rs | 15 +- core/lib/vm_executor/src/batch/factory.rs | 30 +- core/lib/vm_executor/src/oneshot/block.rs | 1 + core/lib/vm_executor/src/oneshot/contracts.rs | 5 + core/lib/vm_executor/src/oneshot/mod.rs | 7 +- core/lib/vm_executor/src/storage.rs | 17 +- core/lib/vm_interface/src/executor.rs | 3 +- core/lib/vm_interface/src/lib.rs | 5 +- core/lib/vm_interface/src/pubdata/mod.rs | 90 + .../src/types/inputs/execution_mode.rs | 19 + core/lib/vm_interface/src/types/inputs/mod.rs | 2 +- core/lib/vm_interface/src/utils/dump.rs | 17 +- core/lib/vm_interface/src/utils/shadow.rs | 16 +- core/lib/vm_interface/src/vm.rs | 13 +- core/node/api_server/src/web3/state.rs | 1 + core/node/block_reverter/src/tests.rs | 1 + core/node/commitment_generator/Cargo.toml | 1 + core/node/commitment_generator/src/lib.rs | 63 +- core/node/commitment_generator/src/utils.rs | 88 +- core/node/consensus/src/storage/store.rs | 8 + core/node/consensus/src/testonly.rs | 13 +- core/node/db_pruner/src/tests.rs | 1 + core/node/eth_sender/src/tests.rs | 4 + core/node/eth_watch/src/lib.rs | 4 +- .../src/l1_gas_price/gas_adjuster/mod.rs | 4 +- core/node/genesis/src/lib.rs | 1 + core/node/logs_bloom_backfill/src/lib.rs | 1 + .../layers/state_keeper/mempool_io.rs | 10 +- .../layers/state_keeper/output_handler.rs | 15 +- core/node/node_sync/src/external_io.rs | 5 +- core/node/node_sync/src/fetcher.rs | 16 +- core/node/node_sync/src/sync_action.rs | 1 + core/node/node_sync/src/tests.rs | 6 +- .../src/request_processor.rs | 91 +- .../src/tee_request_processor.rs | 3 +- .../state_keeper/src/executor/tests/tester.rs | 30 +- core/node/state_keeper/src/io/common/mod.rs | 4 +- core/node/state_keeper/src/io/common/tests.rs | 8 +- core/node/state_keeper/src/io/mempool.rs | 83 +- core/node/state_keeper/src/io/mod.rs | 16 +- core/node/state_keeper/src/io/persistence.rs | 85 +- .../io/seal_logic/l2_block_seal_subtasks.rs | 17 +- .../state_keeper/src/io/seal_logic/mod.rs | 7 +- core/node/state_keeper/src/io/tests/mod.rs | 14 +- core/node/state_keeper/src/io/tests/tester.rs | 2 + core/node/state_keeper/src/keeper.rs | 25 +- core/node/state_keeper/src/testonly/mod.rs | 7 +- .../src/testonly/test_batch_executor.rs | 7 +- core/node/state_keeper/src/tests/mod.rs | 3 +- core/node/state_keeper/src/updates/mod.rs | 20 +- core/node/test_utils/src/lib.rs | 9 + core/node/vm_runner/src/process.rs | 1 + core/node/vm_runner/src/storage.rs | 9 +- core/tests/vm-benchmark/src/vm.rs | 8 +- etc/multivm_bootloaders/vm_gateway/commit | 1 + .../fee_estimate.yul/fee_estimate.yul.zbin | Bin 0 -> 75296 bytes .../vm_gateway/gas_test.yul/gas_test.yul.zbin | Bin 0 -> 71392 bytes .../playground_batch.yul.zbin | Bin 0 -> 75424 bytes .../proved_batch.yul/proved_batch.yul.zbin | Bin 0 -> 71904 bytes prover/Cargo.lock | 1 + yarn.lock | 235 +- zkstack_cli/crates/config/src/contracts.rs | 2 + 188 files changed, 4838 insertions(+), 1207 deletions(-) create mode 100644 core/lib/constants/src/message_root.rs rename core/lib/dal/.sqlx/{query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json => query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json} (78%) rename core/lib/dal/.sqlx/{query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json => query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json} (83%) rename core/lib/dal/.sqlx/{query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json => query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json} (65%) rename core/lib/dal/.sqlx/{query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json => query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json} (70%) rename core/lib/dal/.sqlx/{query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json => query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json} (79%) rename core/lib/dal/.sqlx/{query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json => query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json} (79%) rename core/lib/dal/.sqlx/{query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json => query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json} (83%) rename core/lib/dal/.sqlx/{query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json => query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json} (80%) rename core/lib/dal/.sqlx/{query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json => query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json} (55%) rename core/lib/dal/.sqlx/{query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json => query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json} (73%) rename core/lib/dal/.sqlx/{query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json => query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json} (73%) rename core/lib/dal/.sqlx/{query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json => query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json} (77%) rename core/lib/dal/.sqlx/{query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json => query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json} (84%) create mode 100644 core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql create mode 100644 core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql create mode 100644 core/lib/multivm/src/pubdata_builders/mod.rs create mode 100644 core/lib/multivm/src/pubdata_builders/rollup.rs create mode 100644 core/lib/multivm/src/pubdata_builders/tests.rs create mode 100644 core/lib/multivm/src/pubdata_builders/utils.rs create mode 100644 core/lib/multivm/src/pubdata_builders/validium.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs create mode 100644 core/lib/types/src/commitment/tests/post_gateway_test.json create mode 100644 core/lib/vm_interface/src/pubdata/mod.rs create mode 100644 etc/multivm_bootloaders/vm_gateway/commit create mode 100644 etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_gateway/gas_test.yul/gas_test.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin create mode 100644 etc/multivm_bootloaders/vm_gateway/proved_batch.yul/proved_batch.yul.zbin diff --git a/Cargo.lock b/Cargo.lock index a42ef8e3fdc..64ae0a9a12f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9667,6 +9667,7 @@ dependencies = [ "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", + "zksync_system_constants", "zksync_types", "zksync_utils", "zksync_web3_decl", @@ -10532,6 +10533,7 @@ dependencies = [ "zk_evm 0.150.6", "zksync_contracts", "zksync_eth_signer", + "zksync_mini_merkle_tree", "zksync_system_constants", "zksync_test_account", "zksync_types", diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 56ee3edfd25..70803a66311 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -110,7 +110,12 @@ pub(crate) struct RemoteENConfig { // the `l2_erc20_bridge_addr` and `l2_shared_bridge_addr` are basically the same contract, but with // a different name, with names adapted only for consistency. pub l1_shared_bridge_proxy_addr: Option
, + /// Contract address that serves as a shared bridge on L2. + /// It is expected that `L2SharedBridge` is used before gateway upgrade, and `L2AssetRouter` is used after. pub l2_shared_bridge_addr: Option
, + /// Address of `L2SharedBridge` that was used before gateway upgrade. + /// `None` if chain genesis used post-gateway protocol version. + pub l2_legacy_shared_bridge_addr: Option
, pub l1_erc20_bridge_proxy_addr: Option
, pub l2_erc20_bridge_addr: Option
, pub l1_weth_bridge_addr: Option
, @@ -189,6 +194,7 @@ impl RemoteENConfig { l2_erc20_bridge_addr: l2_erc20_default_bridge, l1_shared_bridge_proxy_addr: bridges.l1_shared_default_bridge, l2_shared_bridge_addr: l2_erc20_shared_bridge, + l2_legacy_shared_bridge_addr: bridges.l2_legacy_shared_bridge, l1_weth_bridge_addr: bridges.l1_weth_bridge, l2_weth_bridge_addr: bridges.l2_weth_bridge, base_token_addr, @@ -218,6 +224,7 @@ impl RemoteENConfig { l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(5)), l1_weth_bridge_addr: None, l2_shared_bridge_addr: Some(Address::repeat_byte(6)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(7)), l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, dummy_verifier: true, } @@ -1403,6 +1410,7 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { l2_erc20_default_bridge: config.remote.l2_erc20_bridge_addr, l1_shared_default_bridge: config.remote.l1_shared_bridge_proxy_addr, l2_shared_default_bridge: config.remote.l2_shared_bridge_addr, + l2_legacy_shared_bridge: config.remote.l2_legacy_shared_bridge_addr, l1_weth_bridge: config.remote.l1_weth_bridge_addr, l2_weth_bridge: config.remote.l2_weth_bridge_addr, }, diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 7d848901353..883f3f8a5fa 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -55,6 +55,7 @@ use zksync_node_framework::{ service::{ZkStackService, ZkStackServiceBuilder}, }; use zksync_state::RocksdbStorageOptions; +use zksync_types::L2_NATIVE_TOKEN_VAULT_ADDRESS; use crate::{config::ExternalNodeConfig, metrics::framework::ExternalNodeMetricsLayer, Component}; @@ -192,11 +193,22 @@ impl ExternalNodeBuilder { // compression. const OPTIONAL_BYTECODE_COMPRESSION: bool = true; + let l2_shared_bridge_addr = self + .config + .remote + .l2_shared_bridge_addr + .context("Missing `l2_shared_bridge_addr`")?; + let l2_legacy_shared_bridge_addr = if l2_shared_bridge_addr == L2_NATIVE_TOKEN_VAULT_ADDRESS + { + // System has migrated to `L2_NATIVE_TOKEN_VAULT_ADDRESS`, use legacy shared bridge address from main node. + self.config.remote.l2_legacy_shared_bridge_addr + } else { + // System hasn't migrated on `L2_NATIVE_TOKEN_VAULT_ADDRESS`, we can safely use `l2_shared_bridge_addr`. + Some(l2_shared_bridge_addr) + }; + let persistence_layer = OutputHandlerLayer::new( - self.config - .remote - .l2_shared_bridge_addr - .expect("L2 shared bridge address is not set"), + l2_legacy_shared_bridge_addr, self.config.optional.l2_block_seal_queue_capacity, ) .with_pre_insert_txs(true) // EN requires txs to be pre-inserted. diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index a440d836b4c..f3c19138880 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -167,6 +167,7 @@ async fn create_l2_block( base_fee_per_gas: 0, gas_per_pubdata_limit: 0, batch_fee_input: Default::default(), + pubdata_params: Default::default(), base_system_contracts_hashes: Default::default(), protocol_version: Some(Default::default()), virtual_blocks: 0, diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 800da68ee50..16167975cf0 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -9,7 +9,7 @@ use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView, WriteStorage}, tracer::VmExecutionStopReason, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterface, VmInterfaceExt, }, tracers::dynamic::vm_1_5_0::DynTracer, @@ -271,8 +271,9 @@ pub(super) fn execute_internal_transfer_test() -> u32 { output: tracer_result.clone(), } .into_tracer_pointer(); + let mut vm: Vm<_, HistoryEnabled> = Vm::new(l1_batch, system_env, storage_view.to_rc_ptr()); - let result = vm.inspect(&mut tracer.into(), VmExecutionMode::Bootloader); + let result = vm.inspect(&mut tracer.into(), InspectExecutionMode::Bootloader); assert!(!result.result.is_failed(), "The internal call has reverted"); tracer_result.take() @@ -331,7 +332,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( let mut total_gas_refunded = 0; for tx in txs { vm.push_transaction(tx); - let tx_execution_result = vm.execute(VmExecutionMode::OneTx); + let tx_execution_result = vm.execute(InspectExecutionMode::OneTx); total_gas_refunded += tx_execution_result.refunds.gas_refunded; if !accept_failure { @@ -343,7 +344,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( } } - let result = vm.execute(VmExecutionMode::Bootloader); + let result = vm.execute(InspectExecutionMode::Bootloader); let metrics = result.get_execution_metrics(None); VmSpentResourcesResult { diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 234e2289424..e2bd487f22b 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -238,9 +238,7 @@ impl MainNodeBuilder { let wallets = self.wallets.clone(); let sk_config = try_load_config!(self.configs.state_keeper_config); let persistence_layer = OutputHandlerLayer::new( - self.contracts_config - .l2_shared_bridge_addr - .context("L2 shared bridge address")?, + self.contracts_config.l2_legacy_shared_bridge_addr, sk_config.l2_block_seal_queue_capacity, ) .with_protective_reads_persistence_enabled(sk_config.protective_reads_persistence_enabled); @@ -249,6 +247,8 @@ impl MainNodeBuilder { sk_config.clone(), try_load_config!(self.configs.mempool_config), try_load_config!(wallets.state_keeper), + self.contracts_config.l2_da_validator_addr, + self.genesis_config.l1_batch_commit_data_generator_mode, ); let db_config = try_load_config!(self.configs.db_config); let experimental_vm_config = self diff --git a/core/lib/basic_types/src/commitment.rs b/core/lib/basic_types/src/commitment.rs index eca339f40f4..0eed46aad78 100644 --- a/core/lib/basic_types/src/commitment.rs +++ b/core/lib/basic_types/src/commitment.rs @@ -1,10 +1,12 @@ +use std::str::FromStr; + use serde::{Deserialize, Serialize}; use strum::{Display, EnumIter}; use crate::{ ethabi, web3::contract::{Detokenize, Error as ContractError}, - U256, + Address, U256, }; #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, EnumIter, Display)] @@ -41,3 +43,23 @@ impl Detokenize for L1BatchCommitmentMode { } } } + +impl FromStr for L1BatchCommitmentMode { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "Rollup" => Ok(Self::Rollup), + "Validium" => Ok(Self::Validium), + _ => { + Err("Incorrect l1 batch commitment mode type; expected one of `Rollup`, `Validium`") + } + } + } +} + +#[derive(Default, Copy, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PubdataParams { + pub l2_da_validator_address: Address, + pub pubdata_type: L1BatchCommitmentMode, +} diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index e01586cdad7..ebecfaa1b87 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -69,6 +69,7 @@ pub enum ProtocolVersionId { Version24, Version25, Version26, + Version27, } impl ProtocolVersionId { @@ -122,6 +123,7 @@ impl ProtocolVersionId { ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, } } @@ -139,6 +141,10 @@ impl ProtocolVersionId { self <= &Self::Version22 } + pub fn is_pre_gateway(&self) -> bool { + self <= &Self::Version26 + } + pub fn is_1_4_0(&self) -> bool { self >= &ProtocolVersionId::Version18 && self < &ProtocolVersionId::Version20 } @@ -278,6 +284,7 @@ impl From for VmVersion { ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, } } } diff --git a/core/lib/basic_types/src/vm.rs b/core/lib/basic_types/src/vm.rs index c753bbfc818..f11f98596f1 100644 --- a/core/lib/basic_types/src/vm.rs +++ b/core/lib/basic_types/src/vm.rs @@ -16,6 +16,7 @@ pub enum VmVersion { Vm1_4_2, Vm1_5_0SmallBootloaderMemory, Vm1_5_0IncreasedBootloaderMemory, + VmGateway, } impl VmVersion { diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index b68720ebaef..0bf7aab3bca 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -29,7 +29,13 @@ pub struct ContractsConfig { pub diamond_proxy_addr: Address, pub validator_timelock_addr: Address, pub l1_shared_bridge_proxy_addr: Option
, + /// Contract address that serves as a shared bridge on L2. + /// It is expected that `L2SharedBridge` is used before gateway upgrade, and `L2AssetRouter` is used after. pub l2_shared_bridge_addr: Option
, + /// Address of `L2SharedBridge` that was used before gateway upgrade. + /// `None` if chain genesis used post-gateway protocol version. + /// If present it will be used as L2 token deployer address. + pub l2_legacy_shared_bridge_addr: Option
, pub l1_erc20_bridge_proxy_addr: Option
, pub l2_erc20_bridge_addr: Option
, pub l1_weth_bridge_proxy_addr: Option
, @@ -40,6 +46,7 @@ pub struct ContractsConfig { // Used by the RPC API and by the node builder in wiring the BaseTokenRatioProvider layer. pub base_token_addr: Option
, pub chain_admin_addr: Option
, + pub l2_da_validator_addr: Option
, } impl ContractsConfig { @@ -53,6 +60,7 @@ impl ContractsConfig { l2_erc20_bridge_addr: Some(Address::repeat_byte(0x0c)), l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(0x0e)), l2_shared_bridge_addr: Some(Address::repeat_byte(0x0f)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(0x19)), l1_weth_bridge_proxy_addr: Some(Address::repeat_byte(0x0b)), l2_weth_bridge_addr: Some(Address::repeat_byte(0x0c)), l2_testnet_paymaster_addr: Some(Address::repeat_byte(0x11)), @@ -61,6 +69,7 @@ impl ContractsConfig { base_token_addr: Some(Address::repeat_byte(0x14)), ecosystem_contracts: Some(EcosystemContracts::for_tests()), chain_admin_addr: Some(Address::repeat_byte(0x18)), + l2_da_validator_addr: Some(Address::repeat_byte(0x1a)), } } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 880bc5aa98d..ce681cc0cc4 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -262,6 +262,7 @@ impl Distribution for EncodeDist { l2_erc20_bridge_addr: self.sample_opt(|| rng.gen()), l1_shared_bridge_proxy_addr: self.sample_opt(|| rng.gen()), l2_shared_bridge_addr: self.sample_opt(|| rng.gen()), + l2_legacy_shared_bridge_addr: self.sample_opt(|| rng.gen()), l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), @@ -269,6 +270,7 @@ impl Distribution for EncodeDist { ecosystem_contracts: self.sample(rng), base_token_addr: self.sample_opt(|| rng.gen()), chain_admin_addr: self.sample_opt(|| rng.gen()), + l2_da_validator_addr: self.sample_opt(|| rng.gen()), } } } diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index fe37ef6c69f..4f0f362d914 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -135,12 +135,36 @@ pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x13, ]); -/// Note, that the `Create2Factory` is explicitly deployed on a non-system-contract address. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, ]); +pub const L2_GENESIS_UPGRADE_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x01, +]); + +pub const L2_BRIDGEHUB_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x02, +]); + +pub const L2_ASSET_ROUTER_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x03, +]); + +pub const L2_NATIVE_TOKEN_VAULT_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x04, +]); + +pub const L2_MESSAGE_ROOT_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x05, +]); + pub const ERC20_TRANSFER_TOPIC: H256 = H256([ 221, 242, 82, 173, 27, 226, 200, 155, 105, 194, 176, 104, 252, 55, 141, 170, 149, 43, 167, 241, 99, 196, 161, 22, 40, 245, 90, 77, 245, 35, 179, 239, diff --git a/core/lib/constants/src/lib.rs b/core/lib/constants/src/lib.rs index 6aab79ad71f..30ae6a7b582 100644 --- a/core/lib/constants/src/lib.rs +++ b/core/lib/constants/src/lib.rs @@ -3,6 +3,7 @@ pub mod contracts; pub mod crypto; pub mod ethereum; pub mod fees; +pub mod message_root; pub mod system_context; pub mod system_logs; pub mod trusted_slots; diff --git a/core/lib/constants/src/message_root.rs b/core/lib/constants/src/message_root.rs new file mode 100644 index 00000000000..a8f4a034fb9 --- /dev/null +++ b/core/lib/constants/src/message_root.rs @@ -0,0 +1,5 @@ +// Position of `FullTree::_height` in `MessageRoot`'s storage layout. +pub const AGG_TREE_HEIGHT_KEY: usize = 3; + +// Position of `FullTree::nodes` in `MessageRoot`'s storage layout. +pub const AGG_TREE_NODES_KEY: usize = 5; diff --git a/core/lib/constants/src/system_logs.rs b/core/lib/constants/src/system_logs.rs index bd4167b3d02..aa2c2cc156c 100644 --- a/core/lib/constants/src/system_logs.rs +++ b/core/lib/constants/src/system_logs.rs @@ -1,11 +1,8 @@ /// The key of the system log with value of the L2->L1 logs tree root hash pub const L2_TO_L1_LOGS_TREE_ROOT_KEY: u32 = 0; -/// The key of the system log with value of the state diff hash -pub const STATE_DIFF_HASH_KEY: u32 = 2; +/// The key of the system log with value of the state diff hash for pre-gateway protocol versions +pub const STATE_DIFF_HASH_KEY_PRE_GATEWAY: u32 = 2; -/// The key of the system log with value of the first blob linear hash -pub const BLOB1_LINEAR_HASH_KEY: u32 = 7; - -/// The key of the system log with value of the second blob linear hash -pub const BLOB2_LINEAR_HASH_KEY: u32 = 8; +/// The key of the system log with value of the first blob linear hash for pre-gateway protocol versions +pub const BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY: u32 = 7; diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 0ee773abcd4..cb5be504c8a 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -516,6 +516,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn playground_gateway() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn estimate_gas_pre_virtual_blocks() -> Self { let bootloader_bytecode = read_zbin_bytecode( "etc/multivm_bootloaders/vm_1_3_2/fee_estimate.yul/fee_estimate.yul.zbin", @@ -586,6 +593,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn estimate_gas_gateway() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn hashes(&self) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: self.bootloader.hash, diff --git a/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json similarity index 78% rename from core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json rename to core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json index dffd3ed8f9d..48adcd41267 100644 --- a/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json +++ b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -175,8 +195,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8" + "hash": "1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7" } diff --git a/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json similarity index 83% rename from core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json rename to core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json index c8c438295e4..5c4ce3d6a4e 100644 --- a/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json +++ b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -92,6 +92,16 @@ "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -115,8 +125,10 @@ false, true, true, - true + true, + false, + false ] }, - "hash": "a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756" + "hash": "250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8" } diff --git a/core/lib/dal/.sqlx/query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json b/core/lib/dal/.sqlx/query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json similarity index 65% rename from core/lib/dal/.sqlx/query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json rename to core/lib/dal/.sqlx/query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json index ecf54f0417b..ffe785d754c 100644 --- a/core/lib/dal/.sqlx/query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json +++ b/core/lib/dal/.sqlx/query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE l1_batches\n SET\n commitment = $1,\n aux_data_hash = $2,\n pass_through_data_hash = $3,\n meta_parameters_hash = $4,\n l2_l1_merkle_root = $5,\n zkporter_is_available = $6,\n compressed_state_diffs = $7,\n compressed_initial_writes = $8,\n compressed_repeated_writes = $9,\n updated_at = NOW()\n WHERE\n number = $10\n AND commitment IS NULL\n ", + "query": "\n UPDATE l1_batches\n SET\n commitment = $1,\n aux_data_hash = $2,\n pass_through_data_hash = $3,\n meta_parameters_hash = $4,\n l2_l1_merkle_root = $5,\n zkporter_is_available = $6,\n compressed_state_diffs = $7,\n compressed_initial_writes = $8,\n compressed_repeated_writes = $9,\n state_diff_hash = $10,\n aggregation_root = $11,\n local_root = $12,\n updated_at = NOW()\n WHERE\n number = $13\n AND commitment IS NULL\n ", "describe": { "columns": [], "parameters": { @@ -14,10 +14,13 @@ "Bytea", "Bytea", "Bytea", + "Bytea", + "Bytea", + "Bytea", "Int8" ] }, "nullable": [] }, - "hash": "55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285" + "hash": "398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388" } diff --git a/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json similarity index 70% rename from core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json rename to core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json index 8c22b4f92c4..11bff110293 100644 --- a/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json +++ b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -181,8 +201,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf" + "hash": "45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746" } diff --git a/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json similarity index 79% rename from core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json rename to core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json index e55d10d6f9a..66d3e18075b 100644 --- a/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json +++ b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -179,8 +199,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7" + "hash": "4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970" } diff --git a/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json similarity index 79% rename from core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json rename to core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json index 84f677a36c8..dfdb4b6c82e 100644 --- a/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json +++ b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -177,8 +197,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e" + "hash": "62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37" } diff --git a/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json similarity index 83% rename from core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json rename to core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json index b8f8db874b6..6cc2e22382d 100644 --- a/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json +++ b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\",\n miniblocks.l2_da_validator_address AS \"l2_da_validator_address!\",\n miniblocks.pubdata_type AS \"pubdata_type!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -72,6 +72,16 @@ "ordinal": 13, "name": "fee_account_address!", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "l2_da_validator_address!", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "pubdata_type!", + "type_info": "Text" } ], "parameters": { @@ -94,8 +104,10 @@ false, false, true, + false, + false, false ] }, - "hash": "2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f" + "hash": "7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2" } diff --git a/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json similarity index 80% rename from core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json rename to core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json index 80a6946026b..f4e08abe31c 100644 --- a/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json +++ b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -177,8 +197,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960" + "hash": "77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9" } diff --git a/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json similarity index 55% rename from core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json rename to core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json index 35c606bf22b..f89f531c446 100644 --- a/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json +++ b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n NOW(),\n NOW()\n )\n ", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", "describe": { "columns": [], "parameters": { @@ -22,10 +22,12 @@ "Int8", "Int8", "Int8", - "Bytea" + "Bytea", + "Bytea", + "Text" ] }, "nullable": [] }, - "hash": "34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04" + "hash": "7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba" } diff --git a/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json similarity index 73% rename from core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json rename to core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json index 4f138822ad1..9a93ba45978 100644 --- a/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json +++ b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -178,8 +198,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8" + "hash": "a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789" } diff --git a/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json similarity index 73% rename from core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json rename to core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json index afac14e6d5c..8a68b1a9b9b 100644 --- a/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json +++ b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -180,8 +200,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b" + "hash": "b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd" } diff --git a/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json similarity index 77% rename from core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json rename to core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json index 4eae4f778ce..f97ea8a6ccd 100644 --- a/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json +++ b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -177,8 +197,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2" + "hash": "c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b" } diff --git a/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json similarity index 84% rename from core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json rename to core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json index 700352c1a8b..111234e02b7 100644 --- a/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json +++ b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -92,6 +92,16 @@ "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -117,8 +127,10 @@ false, true, true, - true + true, + false, + false ] }, - "hash": "f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8" + "hash": "d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9" } diff --git a/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql b/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql new file mode 100644 index 00000000000..9af34d7dc8e --- /dev/null +++ b/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql @@ -0,0 +1,8 @@ +ALTER TABLE l1_batches DROP COLUMN IF EXISTS state_diff_hash BYTEA; + +ALTER TABLE l1_batches DROP COLUMN IF EXISTS aggregation_root; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS local_root; + +ALTER TABLE miniblocks + DROP COLUMN IF EXISTS l2_da_validator_address, + DROP COLUMN IF EXISTS pubdata_type; diff --git a/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql b/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql new file mode 100644 index 00000000000..a58464f6ebb --- /dev/null +++ b/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql @@ -0,0 +1,11 @@ +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS state_diff_hash BYTEA; + +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS aggregation_root BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS local_root BYTEA; + +ALTER TABLE miniblocks + ADD COLUMN IF NOT EXISTS l2_da_validator_address BYTEA NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea, + -- There are miniblocks that used the `Rollup' type, but were actually used on a Validium chain. + -- This is okay, since this field represents how the VM works with the DA, rather what is committed on L1. + ADD COLUMN IF NOT EXISTS pubdata_type TEXT NOT NULL DEFAULT 'Rollup'; +-- ^ Add a default value so that DB queries don't fail even if the DB migration is not completed. diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index f71dc68ce75..943aa12caf7 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -344,10 +344,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE is_sealed AND number = $1 @@ -841,6 +848,8 @@ impl BlocksDal<'_, '_> { fair_pubdata_price, gas_limit, logs_bloom, + l2_da_validator_address, + pubdata_type, created_at, updated_at ) @@ -864,6 +873,8 @@ impl BlocksDal<'_, '_> { $16, $17, $18, + $19, + $20, NOW(), NOW() ) @@ -896,6 +907,11 @@ impl BlocksDal<'_, '_> { l2_block_header.batch_fee_input.fair_pubdata_price() as i64, l2_block_header.gas_limit as i64, l2_block_header.logs_bloom.as_bytes(), + l2_block_header + .pubdata_params + .l2_da_validator_address + .as_bytes(), + l2_block_header.pubdata_params.pubdata_type.to_string(), ); instrumentation.with(query).execute(self.storage).await?; @@ -924,7 +940,9 @@ impl BlocksDal<'_, '_> { virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks ORDER BY @@ -965,7 +983,9 @@ impl BlocksDal<'_, '_> { virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks WHERE @@ -1062,9 +1082,12 @@ impl BlocksDal<'_, '_> { compressed_state_diffs = $7, compressed_initial_writes = $8, compressed_repeated_writes = $9, + state_diff_hash = $10, + aggregation_root = $11, + local_root = $12, updated_at = NOW() WHERE - number = $10 + number = $13 AND commitment IS NULL "#, commitment_artifacts.commitment_hash.commitment.as_bytes(), @@ -1082,6 +1105,9 @@ impl BlocksDal<'_, '_> { commitment_artifacts.compressed_state_diffs, commitment_artifacts.compressed_initial_writes, commitment_artifacts.compressed_repeated_writes, + commitment_artifacts.state_diff_hash.as_bytes(), + commitment_artifacts.aggregation_root.as_bytes(), + commitment_artifacts.local_root.as_bytes(), i64::from(number.0), ) .instrument("save_l1_batch_commitment_artifacts") @@ -1189,10 +1215,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE number = 0 OR eth_commit_tx_id IS NOT NULL @@ -1377,10 +1410,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL @@ -1459,7 +1499,11 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM ( SELECT @@ -1480,6 +1524,7 @@ impl BlocksDal<'_, '_> { $2 ) inn LEFT JOIN commitments ON commitments.l1_batch_number = inn.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number WHERE number - row_number = $1 "#, @@ -1534,10 +1579,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL @@ -1663,10 +1715,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE number BETWEEN $1 AND $2 ORDER BY @@ -1729,11 +1788,18 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NULL AND number != 0 @@ -1809,7 +1875,11 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs index 269c47fa2dd..2b8488dd0c2 100644 --- a/core/lib/dal/src/consensus/conv.rs +++ b/core/lib/dal/src/consensus/conv.rs @@ -4,7 +4,9 @@ use zksync_concurrency::net; use zksync_consensus_roles::{attester, node}; use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ - abi, ethabi, + abi, + commitment::{L1BatchCommitmentMode, PubdataParams}, + ethabi, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, l2::TransactionType, @@ -135,6 +137,20 @@ impl ProtoFmt for Payload { } } + let pubdata_params = if let Some(pubdata_params) = &r.pubdata_params { + Some(PubdataParams { + l2_da_validator_address: required(&pubdata_params.l2_da_validator_address) + .and_then(|a| parse_h160(a)) + .context("l2_da_validator_address")?, + pubdata_type: required(&pubdata_params.pubdata_type) + .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("pubdata_type")? + .parse(), + }) + } else { + None + }; + Ok(Self { protocol_version, hash: required(&r.hash) @@ -153,6 +169,7 @@ impl ProtoFmt for Payload { .context("operator_address")?, transactions, last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, + pubdata_params, }) } @@ -171,6 +188,16 @@ impl ProtoFmt for Payload { transactions: vec![], transactions_v25: vec![], last_in_batch: Some(self.last_in_batch), + pubdata_params: self + .pubdata_params + .map(|pubdata_params| proto::PubdataParams { + l2_da_validator_address: Some( + pubdata_params.l2_da_validator_address.as_bytes().into(), + ), + pubdata_type: Some(proto::L1BatchCommitDataGeneratorMode::new( + &pubdata_params.pubdata_type, + ) as i32), + }), }; match self.protocol_version { v if v >= ProtocolVersionId::Version25 => { @@ -517,3 +544,19 @@ impl ProtoRepr for proto::AttesterCommittee { } } } + +impl proto::L1BatchCommitDataGeneratorMode { + pub(crate) fn new(n: &L1BatchCommitmentMode) -> Self { + match n { + L1BatchCommitmentMode::Rollup => Self::Rollup, + L1BatchCommitmentMode::Validium => Self::Validium, + } + } + + pub(crate) fn parse(&self) -> L1BatchCommitmentMode { + match self { + Self::Rollup => L1BatchCommitmentMode::Rollup, + Self::Validium => L1BatchCommitmentMode::Validium, + } + } +} diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 8e88265730e..c7e46b2cf1b 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -2,7 +2,9 @@ use std::collections::BTreeMap; use zksync_concurrency::net; use zksync_consensus_roles::{attester, node, validator}; -use zksync_types::{ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256}; +use zksync_types::{ + commitment::PubdataParams, ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256, +}; mod conv; pub mod proto; @@ -46,6 +48,7 @@ pub struct Payload { pub operator_address: Address, pub transactions: Vec, pub last_in_batch: bool, + pub pubdata_params: Option, } impl Payload { diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index 421904bf966..49a69e8a36e 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -26,6 +26,12 @@ message Payload { // Set for protocol_version >= 25. repeated TransactionV25 transactions_v25 = 12; optional bool last_in_batch = 10; // required + optional PubdataParams pubdata_params = 13; // optional +} + +message PubdataParams { + optional bytes l2_da_validator_address = 1; // required; H160 + optional L1BatchCommitDataGeneratorMode pubdata_type = 2; // required } message L1Transaction { @@ -142,3 +148,8 @@ message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required } + +enum L1BatchCommitDataGeneratorMode { + Rollup = 0; + Validium = 1; +} diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index e8342b7446c..c9fd91748b2 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -9,7 +9,9 @@ use zksync_protobuf::{ }; use zksync_test_account::Account; use zksync_types::{ - web3::Bytes, Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, + commitment::{L1BatchCommitmentMode, PubdataParams}, + web3::Bytes, + Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, }; use super::*; @@ -51,6 +53,13 @@ fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { }) .collect(), last_in_batch: rng.gen(), + pubdata_params: Some(PubdataParams { + pubdata_type: match rng.gen_range(0..2) { + 0 => L1BatchCommitmentMode::Rollup, + _ => L1BatchCommitmentMode::Validium, + }, + l2_da_validator_address: rng.gen(), + }), } } diff --git a/core/lib/dal/src/consensus_dal/tests.rs b/core/lib/dal/src/consensus_dal/tests.rs index 772e7b2bf5e..694abc8508b 100644 --- a/core/lib/dal/src/consensus_dal/tests.rs +++ b/core/lib/dal/src/consensus_dal/tests.rs @@ -131,6 +131,9 @@ async fn test_batch_certificate() { compressed_repeated_writes: None, zkporter_is_available: false, aux_commitments: None, + aggregation_root: rng.gen(), + local_root: rng.gen(), + state_diff_hash: rng.gen(), }, ) .await diff --git a/core/lib/dal/src/eth_watcher_dal.rs b/core/lib/dal/src/eth_watcher_dal.rs index bdfc7f24c7b..062ad47219d 100644 --- a/core/lib/dal/src/eth_watcher_dal.rs +++ b/core/lib/dal/src/eth_watcher_dal.rs @@ -107,7 +107,7 @@ mod tests { async fn test_get_or_set_next_block_to_process_with_different_event_types() { let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - let mut dal = conn.processed_events_dal(); + let mut dal = conn.eth_watcher_dal(); // Test with ProtocolUpgrades let next_block = dal diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index fbe225beb90..20b428adec4 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -131,7 +131,7 @@ where fn base_token_dal(&mut self) -> BaseTokenDal<'_, 'a>; - fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a>; + fn eth_watcher_dal(&mut self) -> EthWatcherDal<'_, 'a>; } #[derive(Clone, Debug)] @@ -255,7 +255,7 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { BaseTokenDal { storage: self } } - fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a> { + fn eth_watcher_dal(&mut self) -> EthWatcherDal<'_, 'a> { EthWatcherDal { storage: self } } } diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 3bb433a05cf..159ed71cc3e 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -7,7 +7,7 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, block::{L1BatchHeader, L2BlockHeader, UnsealedL1BatchHeader}, - commitment::{L1BatchMetaParameters, L1BatchMetadata}, + commitment::{L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, PubdataParams}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, @@ -155,6 +155,10 @@ pub(crate) struct StorageL1Batch { pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, pub fee_address: Vec, + pub aggregation_root: Option>, + pub local_root: Option>, + pub state_diff_hash: Option>, + pub inclusion_data: Option>, } impl StorageL1Batch { @@ -263,6 +267,10 @@ impl TryFrom for L1BatchMetadata { bootloader_initial_content_commitment: batch .bootloader_initial_content_commitment .map(|v| H256::from_slice(&v)), + state_diff_hash: batch.state_diff_hash.map(|v| H256::from_slice(&v)), + local_root: batch.local_root.map(|v| H256::from_slice(&v)), + aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)), + da_inclusion_data: batch.inclusion_data, }) } } @@ -485,6 +493,8 @@ pub(crate) struct StorageL2BlockHeader { /// This value should bound the maximal amount of gas that can be spent by transactions in the miniblock. pub gas_limit: Option, pub logs_bloom: Option>, + pub l2_da_validator_address: Vec, + pub pubdata_type: String, } impl From for L2BlockHeader { @@ -532,6 +542,10 @@ impl From for L2BlockHeader { .logs_bloom .map(|b| Bloom::from_slice(&b)) .unwrap_or_default(), + pubdata_params: PubdataParams { + l2_da_validator_address: Address::from_slice(&row.l2_da_validator_address), + pubdata_type: L1BatchCommitmentMode::from_str(&row.pubdata_type).unwrap(), + }, } } } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 7a4ebe074fe..0eb65a606d1 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,7 +1,11 @@ +use std::str::FromStr; + use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::error::SqlxContext; use zksync_types::{ - api::en, parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, + api::en, + commitment::{L1BatchCommitmentMode, PubdataParams}, + parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, }; @@ -25,6 +29,8 @@ pub(crate) struct StorageSyncBlock { pub protocol_version: i32, pub virtual_blocks: i64, pub hash: Vec, + pub l2_da_validator_address: Vec, + pub pubdata_type: String, } pub(crate) struct SyncBlock { @@ -40,6 +46,7 @@ pub(crate) struct SyncBlock { pub virtual_blocks: u32, pub hash: H256, pub protocol_version: ProtocolVersionId, + pub pubdata_params: PubdataParams, } impl TryFrom for SyncBlock { @@ -89,6 +96,12 @@ impl TryFrom for SyncBlock { .decode_column("virtual_blocks")?, hash: parse_h256(&block.hash).decode_column("hash")?, protocol_version: parse_protocol_version(block.protocol_version)?, + pubdata_params: PubdataParams { + pubdata_type: L1BatchCommitmentMode::from_str(&block.pubdata_type) + .decode_column("Invalid pubdata type")?, + l2_da_validator_address: parse_h160(&block.l2_da_validator_address) + .decode_column("l2_da_validator_address")?, + }, }) } } @@ -109,6 +122,7 @@ impl SyncBlock { virtual_blocks: Some(self.virtual_blocks), hash: Some(self.hash), protocol_version: self.protocol_version, + pubdata_params: Some(self.pubdata_params), } } @@ -125,6 +139,7 @@ impl SyncBlock { operator_address: self.fee_account_address, transactions, last_in_batch: self.last_in_batch, + pubdata_params: Some(self.pubdata_params), } } } diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 265c6135488..55e6543c028 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -56,7 +56,9 @@ impl SyncDal<'_, '_> { miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", - miniblocks.fee_account_address AS "fee_account_address!" + miniblocks.fee_account_address AS "fee_account_address!", + miniblocks.l2_da_validator_address AS "l2_da_validator_address!", + miniblocks.pubdata_type AS "pubdata_type!" FROM miniblocks WHERE diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index bf85008f7b5..baa2ee58485 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -4,6 +4,7 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::connection_pool::ConnectionPool; use zksync_types::{ block::{L1BatchHeader, L2BlockHasher, L2BlockHeader}, + commitment::PubdataParams, fee::Fee, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, @@ -52,6 +53,7 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: PubdataParams::default(), } } diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 298c43b80cc..3792f356be4 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -63,6 +63,7 @@ mod tests { l2_weth_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l1_shared_bridge_proxy_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l2_shared_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), + l2_legacy_shared_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l2_testnet_paymaster_addr: Some(addr("FC073319977e314F251EAE6ae6bE76B0B3BAeeCF")), l1_multicall3_addr: addr("0xcA11bde05977b3631167028862bE2a173976CA11"), ecosystem_contracts: Some(EcosystemContracts { @@ -72,6 +73,7 @@ mod tests { }), base_token_addr: Some(SHARED_BRIDGE_ETHER_TOKEN_ADDRESS), chain_admin_addr: Some(addr("0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), + l2_da_validator_addr: Some(addr("0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), } } @@ -93,11 +95,13 @@ CONTRACTS_L2_CONSENSUS_REGISTRY_ADDR="D64e136566a9E04eb05B30184fF577F52682D182" CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" CONTRACTS_L1_SHARED_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" +CONTRACTS_L2_LEGACY_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_BRIDGEHUB_PROXY_ADDR="0x35ea7f92f4c5f433efe15284e99c040110cf6297" CONTRACTS_STATE_TRANSITION_PROXY_ADDR="0xd90f1c081c6117241624e97cb6147257c3cb2097" CONTRACTS_TRANSPARENT_PROXY_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347e5" CONTRACTS_BASE_TOKEN_ADDR="0x0000000000000000000000000000000000000001" CONTRACTS_CHAIN_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff" +CONTRACTS_L2_DA_VALIDATOR_ADDR="0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff" "#; lock.set_env(config); diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 5e788509461..de115cf6e7a 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -15,7 +15,7 @@ use crate::{ BaseFees, EthFeeInterface, EthInterface, RawTransactionBytes, }; -const FEE_HISTORY_MAX_REQUEST_CHUNK: usize = 1024; +const FEE_HISTORY_MAX_REQUEST_CHUNK: usize = 1023; #[async_trait] impl EthInterface for T @@ -304,14 +304,14 @@ where COUNTERS.call[&(Method::BaseFeeHistory, client.component())].inc(); let latency = LATENCIES.direct[&Method::BaseFeeHistory].start(); let mut history = Vec::with_capacity(block_count); - let from_block = upto_block.saturating_sub(block_count); + let from_block = upto_block.saturating_sub(block_count - 1); // Here we are requesting `fee_history` from blocks // `(from_block; upto_block)` in chunks of size `MAX_REQUEST_CHUNK` // starting from the oldest block. for chunk_start in (from_block..=upto_block).step_by(FEE_HISTORY_MAX_REQUEST_CHUNK) { let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); - let chunk_size = chunk_end - chunk_start; + let chunk_size = chunk_end - chunk_start + 1; let fee_history = client .fee_history( @@ -324,22 +324,50 @@ where .with_arg("block", &chunk_end) .await?; - // Check that the lengths are the same. + if fee_history.oldest_block != web3::BlockNumber::Number(chunk_start.into()) { + let oldest_block = match fee_history.oldest_block { + web3::BlockNumber::Number(oldest_block) => oldest_block.to_string(), + _ => format!("{:?}", fee_history.oldest_block), + }; + let message = + format!("unexpected `oldest_block`, expected: {chunk_start}, got {oldest_block}"); + return Err(EnrichedClientError::custom(message, "l1_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + + if fee_history.base_fee_per_gas.len() != chunk_size + 1 { + let message = format!( + "unexpected `base_fee_per_gas.len()`, expected: {}, got {}", + chunk_size + 1, + fee_history.base_fee_per_gas.len() + ); + return Err(EnrichedClientError::custom(message, "l1_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + // Per specification, the values should always be provided, and must be 0 for blocks // prior to EIP-4844. // https://ethereum.github.io/execution-apis/api-documentation/ - if fee_history.base_fee_per_gas.len() != fee_history.base_fee_per_blob_gas.len() { - tracing::error!( - "base_fee_per_gas and base_fee_per_blob_gas have different lengths: {} and {}", - fee_history.base_fee_per_gas.len(), + if fee_history.base_fee_per_blob_gas.len() != chunk_size + 1 { + let message = format!( + "unexpected `base_fee_per_blob_gas.len()`, expected: {}, got {}", + chunk_size + 1, fee_history.base_fee_per_blob_gas.len() ); + return Err(EnrichedClientError::custom(message, "l1_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); } + // We take `chunk_size` entries for consistency with `l2_base_fee_history` which doesn't + // have correct data for block with number `upto_block + 1`. for (base, blob) in fee_history .base_fee_per_gas .into_iter() .zip(fee_history.base_fee_per_blob_gas) + .take(chunk_size) { let fees = BaseFees { base_fee_per_gas: cast_to_u64(base, "base_fee_per_gas")?, @@ -387,14 +415,14 @@ where COUNTERS.call[&(Method::L2FeeHistory, client.component())].inc(); let latency = LATENCIES.direct[&Method::BaseFeeHistory].start(); let mut history = Vec::with_capacity(block_count); - let from_block = upto_block.saturating_sub(block_count); + let from_block = upto_block.saturating_sub(block_count - 1); // Here we are requesting `fee_history` from blocks // `(from_block; upto_block)` in chunks of size `FEE_HISTORY_MAX_REQUEST_CHUNK` // starting from the oldest block. for chunk_start in (from_block..=upto_block).step_by(FEE_HISTORY_MAX_REQUEST_CHUNK) { let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); - let chunk_size = chunk_end - chunk_start; + let chunk_size = chunk_end - chunk_start + 1; let fee_history = client .fee_history(U64::from(chunk_size).into(), chunk_end.into(), vec![]) @@ -403,19 +431,46 @@ where .with_arg("block", &chunk_end) .await?; - // Check that the lengths are the same. - if fee_history.inner.base_fee_per_gas.len() != fee_history.l2_pubdata_price.len() { - tracing::error!( - "base_fee_per_gas and pubdata_price have different lengths: {} and {}", - fee_history.inner.base_fee_per_gas.len(), + if fee_history.inner.oldest_block != web3::BlockNumber::Number(chunk_start.into()) { + let oldest_block = match fee_history.inner.oldest_block { + web3::BlockNumber::Number(oldest_block) => oldest_block.to_string(), + _ => format!("{:?}", fee_history.inner.oldest_block), + }; + let message = + format!("unexpected `oldest_block`, expected: {chunk_start}, got {oldest_block}"); + return Err(EnrichedClientError::custom(message, "l2_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + + if fee_history.inner.base_fee_per_gas.len() != chunk_size + 1 { + let message = format!( + "unexpected `base_fee_per_gas.len()`, expected: {}, got {}", + chunk_size + 1, + fee_history.inner.base_fee_per_gas.len() + ); + return Err(EnrichedClientError::custom(message, "l2_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + + if fee_history.l2_pubdata_price.len() != chunk_size { + let message = format!( + "unexpected `l2_pubdata_price.len()`, expected: {}, got {}", + chunk_size + 1, fee_history.l2_pubdata_price.len() ); + return Err(EnrichedClientError::custom(message, "l2_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); } + // We take `chunk_size` entries because base fee for block `upto_block + 1` may change. for (base, l2_pubdata_price) in fee_history .inner .base_fee_per_gas .into_iter() + .take(chunk_size) .zip(fee_history.l2_pubdata_price) { let fees = BaseFees { diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index b33554b6292..8e81b6c6f20 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -415,25 +415,35 @@ fn l2_eth_fee_history( let from_block = from_block.as_usize(); let start_block = from_block.saturating_sub(block_count.as_usize() - 1); + // duplicates last value to follow `feeHistory` response format, it should return `block_count + 1` values + let base_fee_per_gas = base_fee_history[start_block..=from_block] + .iter() + .chain([&base_fee_history[from_block]]) + .map(|fee| U256::from(fee.base_fee_per_gas)) + .collect(); + + // duplicates last value to follow `feeHistory` response format, it should return `block_count + 1` values + let base_fee_per_blob_gas = base_fee_history[start_block..=from_block] + .iter() + .chain([&base_fee_history[from_block]]) // duplicate last value + .map(|fee| fee.base_fee_per_blob_gas) + .collect(); + + let l2_pubdata_price = base_fee_history[start_block..=from_block] + .iter() + .map(|fee| fee.l2_pubdata_price) + .collect(); + FeeHistory { inner: web3::FeeHistory { oldest_block: start_block.into(), - base_fee_per_gas: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| U256::from(fee.base_fee_per_gas)) - .collect(), - base_fee_per_blob_gas: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| fee.base_fee_per_blob_gas) - .collect(), + base_fee_per_gas, + base_fee_per_blob_gas, gas_used_ratio: vec![], // not used blob_gas_used_ratio: vec![], // not used reward: None, }, - l2_pubdata_price: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| fee.l2_pubdata_price) - .collect(), + l2_pubdata_price, } } diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index e49086a6b8b..eb770bf9b57 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -29,6 +29,7 @@ zksync_contracts.workspace = true zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_vm_interface.workspace = true +zksync_mini_merkle_tree.workspace = true anyhow.workspace = true hex.workspace = true diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 520274c14ae..1cba2c0fb92 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -20,6 +20,7 @@ pub use crate::{ }; mod glue; +pub mod pubdata_builders; pub mod tracers; pub mod utils; mod versions; diff --git a/core/lib/multivm/src/pubdata_builders/mod.rs b/core/lib/multivm/src/pubdata_builders/mod.rs new file mode 100644 index 00000000000..c52c4c70c86 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/mod.rs @@ -0,0 +1,24 @@ +use std::rc::Rc; + +pub use rollup::RollupPubdataBuilder; +pub use validium::ValidiumPubdataBuilder; +use zksync_types::commitment::{L1BatchCommitmentMode, PubdataParams}; + +use crate::interface::pubdata::PubdataBuilder; + +mod rollup; +#[cfg(test)] +mod tests; +mod utils; +mod validium; + +pub fn pubdata_params_to_builder(params: PubdataParams) -> Rc { + match params.pubdata_type { + L1BatchCommitmentMode::Rollup => { + Rc::new(RollupPubdataBuilder::new(params.l2_da_validator_address)) + } + L1BatchCommitmentMode::Validium => { + Rc::new(ValidiumPubdataBuilder::new(params.l2_da_validator_address)) + } + } +} diff --git a/core/lib/multivm/src/pubdata_builders/rollup.rs b/core/lib/multivm/src/pubdata_builders/rollup.rs new file mode 100644 index 00000000000..4a818dfe231 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/rollup.rs @@ -0,0 +1,128 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + writes::compress_state_diffs, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct RollupPubdataBuilder { + pub l2_da_validator: Address, +} + +impl RollupPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for RollupPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + if protocol_version.is_pre_gateway() { + let mut operator_input = vec![]; + extend_from_pubdata_input(&mut operator_input, input); + + // Extend with uncompressed state diffs. + operator_input.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + operator_input.extend(state_diff.encode_padded()); + } + + operator_input + } else { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + // Extend with uncompressed state diffs. + pubdata.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + pubdata.extend(state_diff.encode_padded()); + } + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)].concat() + } + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + _protocol_version: ProtocolVersionId, + ) -> Vec { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + pubdata + } +} + +fn extend_from_pubdata_input(buffer: &mut Vec, pubdata_input: &PubdataInput) { + let PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } = pubdata_input; + + // Adding user L2->L1 logs. + buffer.extend(encode_user_logs(user_logs)); + + // Encoding L2->L1 messages + // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` + buffer.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); + for message in l2_to_l1_messages { + buffer.extend((message.len() as u32).to_be_bytes()); + buffer.extend(message); + } + // Encoding bytecodes + // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` + buffer.extend((published_bytecodes.len() as u32).to_be_bytes()); + for bytecode in published_bytecodes { + buffer.extend((bytecode.len() as u32).to_be_bytes()); + buffer.extend(bytecode); + } + // Encoding state diffs + // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` + let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); + buffer.extend(state_diffs_compressed); +} diff --git a/core/lib/multivm/src/pubdata_builders/tests.rs b/core/lib/multivm/src/pubdata_builders/tests.rs new file mode 100644 index 00000000000..bc24b8e4734 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/tests.rs @@ -0,0 +1,123 @@ +use zksync_types::{ + writes::StateDiffRecord, Address, ProtocolVersionId, ACCOUNT_CODE_STORAGE_ADDRESS, + BOOTLOADER_ADDRESS, +}; +use zksync_utils::u256_to_h256; + +use super::{rollup::RollupPubdataBuilder, validium::ValidiumPubdataBuilder}; +use crate::interface::pubdata::{L1MessengerL2ToL1Log, PubdataBuilder, PubdataInput}; + +fn mock_input() -> PubdataInput { + // Just using some constant addresses for tests + let addr1 = BOOTLOADER_ADDRESS; + let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; + + let user_logs = vec![L1MessengerL2ToL1Log { + l2_shard_id: 0, + is_service: false, + tx_number_in_block: 0, + sender: addr1, + key: 1.into(), + value: 128.into(), + }]; + + let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; + + let published_bytecodes = vec![hex::decode("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb").unwrap()]; + + // For covering more cases, we have two state diffs: + // One with enumeration index present (and so it is a repeated write) and the one without it. + let state_diffs = vec![ + StateDiffRecord { + address: addr2, + key: 155.into(), + derived_key: u256_to_h256(125.into()).0, + enumeration_index: 12, + initial_value: 11.into(), + final_value: 12.into(), + }, + StateDiffRecord { + address: addr2, + key: 156.into(), + derived_key: u256_to_h256(126.into()).0, + enumeration_index: 0, + initial_value: 0.into(), + final_value: 14.into(), + }, + ]; + + PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } +} + +#[test] +fn test_rollup_pubdata_building() { + let input = mock_input(); + + let rollup_pubdata_builder = RollupPubdataBuilder::new(Address::zero()); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000032300000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (post gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (post gateway)" + ); +} + +#[test] +fn test_validium_pubdata_building() { + let input = mock_input(); + + let validium_pubdata_builder = ValidiumPubdataBuilder::new(Address::zero()); + + let actual = + validium_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000005c000000010000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input`" + ); + + let actual = + validium_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "fa96e2436e6fb4d668f5a06681a7c53fcb199b2747ee624ee52a13e85aac5f1e"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata`" + ); +} diff --git a/core/lib/multivm/src/pubdata_builders/utils.rs b/core/lib/multivm/src/pubdata_builders/utils.rs new file mode 100644 index 00000000000..57361a674fb --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/utils.rs @@ -0,0 +1,70 @@ +use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_types::web3::keccak256; +use zksync_utils::bytecode::hash_bytecode; + +use crate::interface::pubdata::L1MessengerL2ToL1Log; + +pub(crate) fn build_chained_log_hash(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + let mut chained_log_hash = vec![0u8; 32]; + + for log in user_logs { + let log_bytes = log.packed_encoding(); + let hash = keccak256(&log_bytes); + + chained_log_hash = keccak256(&[chained_log_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_log_hash +} + +pub(crate) fn build_logs_root( + user_logs: &[L1MessengerL2ToL1Log], + l2_to_l1_logs_tree_size: usize, +) -> Vec { + let logs = user_logs.iter().map(|log| { + let encoded = log.packed_encoding(); + let mut slice = [0u8; 88]; + slice.copy_from_slice(&encoded); + slice + }); + MiniMerkleTree::new(logs, Some(l2_to_l1_logs_tree_size)) + .merkle_root() + .as_bytes() + .to_vec() +} + +pub(crate) fn build_chained_message_hash(l2_to_l1_messages: &[Vec]) -> Vec { + let mut chained_msg_hash = vec![0u8; 32]; + + for msg in l2_to_l1_messages { + let hash = keccak256(msg); + + chained_msg_hash = keccak256(&[chained_msg_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_msg_hash +} + +pub(crate) fn build_chained_bytecode_hash(published_bytecodes: &[Vec]) -> Vec { + let mut chained_bytecode_hash = vec![0u8; 32]; + + for bytecode in published_bytecodes { + let hash = hash_bytecode(bytecode).to_fixed_bytes(); + + chained_bytecode_hash = + keccak256(&[chained_bytecode_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_bytecode_hash +} + +pub(crate) fn encode_user_logs(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + // Encoding user L2->L1 logs. + // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` + let mut result = vec![]; + result.extend((user_logs.len() as u32).to_be_bytes()); + for l2tol1log in user_logs { + result.extend(l2tol1log.packed_encoding()); + } + result +} diff --git a/core/lib/multivm/src/pubdata_builders/validium.rs b/core/lib/multivm/src/pubdata_builders/validium.rs new file mode 100644 index 00000000000..a9156e970aa --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/validium.rs @@ -0,0 +1,93 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + web3::keccak256, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct ValidiumPubdataBuilder { + pub l2_da_validator: Address, +} + +impl ValidiumPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for ValidiumPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let mut pubdata = vec![]; + pubdata.extend(encode_user_logs(&input.user_logs)); + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)] + .concat() + .to_vec() + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let state_diffs_packed = input + .state_diffs + .iter() + .flat_map(|diff| diff.encode_padded()) + .collect::>(); + + keccak256(&state_diffs_packed).to_vec() + } +} diff --git a/core/lib/multivm/src/utils/events.rs b/core/lib/multivm/src/utils/events.rs index 9720cb77914..d84651989e7 100644 --- a/core/lib/multivm/src/utils/events.rs +++ b/core/lib/multivm/src/utils/events.rs @@ -1,59 +1,10 @@ use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ ethabi::{self, Token}, - l2_to_l1_log::L2ToL1Log, - Address, H256, U256, + H256, U256, }; -use zksync_utils::{u256_to_bytes_be, u256_to_h256}; -use crate::interface::VmEvent; - -/// Corresponds to the following solidity event: -/// ```solidity -/// struct L2ToL1Log { -/// uint8 l2ShardId; -/// bool isService; -/// uint16 txNumberInBlock; -/// address sender; -/// bytes32 key; -/// bytes32 value; -/// } -/// ``` -#[derive(Debug, Default, Clone, PartialEq)] -pub(crate) struct L1MessengerL2ToL1Log { - pub l2_shard_id: u8, - pub is_service: bool, - pub tx_number_in_block: u16, - pub sender: Address, - pub key: U256, - pub value: U256, -} - -impl L1MessengerL2ToL1Log { - pub fn packed_encoding(&self) -> Vec { - let mut res: Vec = vec![]; - res.push(self.l2_shard_id); - res.push(self.is_service as u8); - res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); - res.extend_from_slice(self.sender.as_bytes()); - res.extend(u256_to_bytes_be(&self.key)); - res.extend(u256_to_bytes_be(&self.value)); - res - } -} - -impl From for L2ToL1Log { - fn from(log: L1MessengerL2ToL1Log) -> Self { - L2ToL1Log { - shard_id: log.l2_shard_id, - is_service: log.is_service, - tx_number_in_block: log.tx_number_in_block, - sender: log.sender, - key: u256_to_h256(log.key), - value: u256_to_h256(log.value), - } - } -} +use crate::interface::{pubdata::L1MessengerL2ToL1Log, VmEvent}; #[derive(Debug, PartialEq)] pub(crate) struct L1MessengerBytecodePublicationRequest { @@ -142,7 +93,8 @@ mod tests { use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, }; - use zksync_types::L1BatchNumber; + use zksync_types::{Address, L1BatchNumber}; + use zksync_utils::u256_to_h256; use super::*; diff --git a/core/lib/multivm/src/utils/mod.rs b/core/lib/multivm/src/utils/mod.rs index 5d8fba7a2ac..a55adb16c85 100644 --- a/core/lib/multivm/src/utils/mod.rs +++ b/core/lib/multivm/src/utils/mod.rs @@ -53,7 +53,9 @@ pub fn derive_base_fee_and_gas_per_pubdata( VmVersion::Vm1_4_2 => crate::vm_1_4_2::utils::fee::derive_base_fee_and_gas_per_pubdata( batch_fee_input.into_pubdata_independent(), ), - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => { crate::vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata( batch_fee_input.into_pubdata_independent(), ) @@ -81,9 +83,9 @@ pub fn get_batch_base_fee(l1_batch_env: &L1BatchEnv, vm_version: VmVersion) -> u } VmVersion::Vm1_4_1 => crate::vm_1_4_1::utils::fee::get_batch_base_fee(l1_batch_env), VmVersion::Vm1_4_2 => crate::vm_1_4_2::utils::fee::get_batch_base_fee(l1_batch_env), - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::utils::fee::get_batch_base_fee(l1_batch_env) - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::utils::fee::get_batch_base_fee(l1_batch_env), } } @@ -209,9 +211,9 @@ pub fn derive_overhead( } VmVersion::Vm1_4_1 => crate::vm_1_4_1::utils::overhead::derive_overhead(encoded_len), VmVersion::Vm1_4_2 => crate::vm_1_4_2::utils::overhead::derive_overhead(encoded_len), - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::utils::overhead::derive_overhead(encoded_len) - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::utils::overhead::derive_overhead(encoded_len), } } @@ -245,6 +247,9 @@ pub fn get_bootloader_encoding_space(version: VmVersion) -> u32 { crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_bootloader_tx_encoding_space( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } @@ -264,9 +269,9 @@ pub fn get_bootloader_max_txs_in_batch(version: VmVersion) -> usize { VmVersion::VmBoojumIntegration => crate::vm_boojum_integration::constants::MAX_TXS_IN_BLOCK, VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::MAX_TXS_IN_BATCH, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::MAX_TXS_IN_BATCH, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::MAX_TXS_IN_BATCH - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::MAX_TXS_IN_BATCH, } } @@ -287,9 +292,9 @@ pub fn gas_bootloader_batch_tip_overhead(version: VmVersion) -> u32 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::BOOTLOADER_BATCH_TIP_OVERHEAD, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BOOTLOADER_BATCH_TIP_OVERHEAD, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_OVERHEAD - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_OVERHEAD, } } @@ -310,7 +315,9 @@ pub fn circuit_statistics_bootloader_batch_tip_overhead(version: VmVersion) -> u VmVersion::Vm1_4_2 => { crate::vm_1_4_2::constants::BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as usize } - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => { crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as usize } } @@ -333,7 +340,9 @@ pub fn execution_metrics_bootloader_batch_tip_overhead(version: VmVersion) -> us VmVersion::Vm1_4_2 => { crate::vm_1_4_2::constants::BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as usize } - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => { crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as usize } } @@ -357,9 +366,9 @@ pub fn get_max_gas_per_pubdata_byte(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::MAX_GAS_PER_PUBDATA_BYTE, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::MAX_GAS_PER_PUBDATA_BYTE, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::MAX_GAS_PER_PUBDATA_BYTE - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::MAX_GAS_PER_PUBDATA_BYTE, } } @@ -393,6 +402,9 @@ pub fn get_used_bootloader_memory_bytes(version: VmVersion) -> usize { crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } @@ -426,6 +438,9 @@ pub fn get_used_bootloader_memory_words(version: VmVersion) -> usize { crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } @@ -447,9 +462,9 @@ pub fn get_max_batch_gas_limit(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::BLOCK_GAS_LIMIT as u64, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BLOCK_GAS_LIMIT as u64, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::BATCH_GAS_LIMIT - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::BATCH_GAS_LIMIT, } } @@ -473,9 +488,9 @@ pub fn get_eth_call_gas_limit(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::ETH_CALL_GAS_LIMIT as u64, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::ETH_CALL_GAS_LIMIT as u64, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::ETH_CALL_GAS_LIMIT - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::ETH_CALL_GAS_LIMIT, } } @@ -496,9 +511,9 @@ pub fn get_max_batch_base_layer_circuits(version: VmVersion) -> usize { // We avoid providing `0` for the old versions to avoid potential errors when working with old versions. crate::vm_1_4_2::constants::MAX_BASE_LAYER_CIRCUITS } - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::MAX_BASE_LAYER_CIRCUITS - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::MAX_BASE_LAYER_CIRCUITS, } } diff --git a/core/lib/multivm/src/versions/shadow/mod.rs b/core/lib/multivm/src/versions/shadow/mod.rs index fe9ce8eefcb..42a0fbb1b8b 100644 --- a/core/lib/multivm/src/versions/shadow/mod.rs +++ b/core/lib/multivm/src/versions/shadow/mod.rs @@ -198,7 +198,6 @@ impl Harness { assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); self.new_block(vm, &[deploy_tx.tx.hash(), load_test_tx.hash()]); - vm.finish_batch(); } } diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs index 64179f59be1..6a39a28f763 100644 --- a/core/lib/multivm/src/versions/shadow/tests.rs +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -1,14 +1,15 @@ //! Unit tests from the `testonly` test suite. -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H256, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use super::ShadowedFastVm; use crate::{ interface::{ utils::{ShadowMut, ShadowRef}, - CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, + CurrentExecutionState, L2BlockEnv, VmExecutionResultAndLogs, }, versions::testonly::TestedVm, }; @@ -41,14 +42,25 @@ impl TestedVm for ShadowedFastVm { }) } - fn execute_with_state_diffs( + fn finish_batch_with_state_diffs( &mut self, diffs: Vec, - mode: VmExecutionMode, + pubdata_builder: Rc, ) -> VmExecutionResultAndLogs { - self.get_custom_mut("execute_with_state_diffs", |r| match r { - ShadowMut::Main(vm) => vm.execute_with_state_diffs(diffs.clone(), mode), - ShadowMut::Shadow(vm) => vm.execute_with_state_diffs(diffs.clone(), mode), + self.get_custom_mut("finish_batch_with_state_diffs", |r| match r { + ShadowMut::Main(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + ShadowMut::Shadow(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + }) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.get_custom_mut("finish_batch_without_pubdata", |r| match r { + ShadowMut::Main(vm) => vm.finish_batch_without_pubdata(), + ShadowMut::Shadow(vm) => vm.finish_batch_without_pubdata(), }) } diff --git a/core/lib/multivm/src/versions/testonly/block_tip.rs b/core/lib/multivm/src/versions/testonly/block_tip.rs index 7700f347ca6..220653308a7 100644 --- a/core/lib/multivm/src/versions/testonly/block_tip.rs +++ b/core/lib/multivm/src/versions/testonly/block_tip.rs @@ -11,11 +11,11 @@ use zksync_types::{ use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use super::{ - get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, + default_pubdata_builder, get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, tester::{TestedVm, VmTesterBuilder}, }; use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, L1BatchEnv, TxExecutionMode, VmInterfaceExt}, versions::testonly::default_l1_batch, vm_latest::constants::{ BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, @@ -156,7 +156,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction {i} wasn't successful for input: {:#?}", @@ -169,7 +169,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics let gas_before = vm.vm.gas_remaining(); let result = vm .vm - .execute_with_state_diffs(test_data.state_diffs.clone(), VmExecutionMode::Batch); + .finish_batch_with_state_diffs(test_data.state_diffs.clone(), default_pubdata_builder()); assert!( !result.result.is_failed(), "Batch wasn't successful for input: {test_data:?}" diff --git a/core/lib/multivm/src/versions/testonly/bootloader.rs b/core/lib/multivm/src/versions/testonly/bootloader.rs index e3177e07851..4b9b63252d6 100644 --- a/core/lib/multivm/src/versions/testonly/bootloader.rs +++ b/core/lib/multivm/src/versions/testonly/bootloader.rs @@ -2,7 +2,7 @@ use assert_matches::assert_matches; use zksync_types::U256; use super::{get_bootloader, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{ExecutionResult, Halt, TxExecutionMode}; pub(crate) fn test_dummy_bootloader() { let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); @@ -14,7 +14,7 @@ pub(crate) fn test_dummy_bootloader() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build::(); - let result = vm.vm.execute(VmExecutionMode::Batch); + let result = vm.vm.finish_batch_without_pubdata(); assert!(!result.result.is_failed()); let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); @@ -33,7 +33,7 @@ pub(crate) fn test_bootloader_out_of_gas() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build::(); - let res = vm.vm.execute(VmExecutionMode::Batch); + let res = vm.vm.finish_batch_without_pubdata(); assert_matches!( res.result, diff --git a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs index 346241a9624..9da005b995d 100644 --- a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs @@ -1,8 +1,8 @@ use zksync_test_account::TxType; -use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; +use super::{default_pubdata_builder, read_test_contract, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmEvent, VmInterfaceExt}, utils::bytecode, }; @@ -30,10 +30,10 @@ pub(crate) fn test_bytecode_publishing() { compressed_bytecode ); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); - vm.vm.execute(VmExecutionMode::Batch); + vm.vm.finish_batch(default_pubdata_builder()); let state = vm.vm.get_current_execution_state(); let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); diff --git a/core/lib/multivm/src/versions/testonly/circuits.rs b/core/lib/multivm/src/versions/testonly/circuits.rs index 9503efe9208..de987a8912d 100644 --- a/core/lib/multivm/src/versions/testonly/circuits.rs +++ b/core/lib/multivm/src/versions/testonly/circuits.rs @@ -2,7 +2,7 @@ use zksync_types::{Address, Execute, U256}; use super::tester::VmTesterBuilder; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, versions::testonly::TestedVm, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -28,7 +28,7 @@ pub(crate) fn test_circuits() { None, ); vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!res.result.is_failed(), "{res:#?}"); let s = res.statistics.circuit_statistic; diff --git a/core/lib/multivm/src/versions/testonly/code_oracle.rs b/core/lib/multivm/src/versions/testonly/code_oracle.rs index b786539329b..767a294f44a 100644 --- a/core/lib/multivm/src/versions/testonly/code_oracle.rs +++ b/core/lib/multivm/src/versions/testonly/code_oracle.rs @@ -9,7 +9,7 @@ use super::{ tester::VmTesterBuilder, TestedVm, }; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, versions::testonly::ContractToDeploy, }; @@ -68,7 +68,7 @@ pub(crate) fn test_code_oracle() { ); vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" @@ -91,7 +91,7 @@ pub(crate) fn test_code_oracle() { None, ); vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" @@ -160,7 +160,7 @@ pub(crate) fn test_code_oracle_big_bytecode() { ); vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" @@ -222,7 +222,7 @@ pub(crate) fn test_refunds_in_code_oracle() { ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" diff --git a/core/lib/multivm/src/versions/testonly/default_aa.rs b/core/lib/multivm/src/versions/testonly/default_aa.rs index 3f121dcf7e6..c69c00de450 100644 --- a/core/lib/multivm/src/versions/testonly/default_aa.rs +++ b/core/lib/multivm/src/versions/testonly/default_aa.rs @@ -7,9 +7,9 @@ use zksync_types::{ }; use zksync_utils::h256_to_u256; -use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; +use super::{default_pubdata_builder, read_test_contract, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, vm_latest::utils::fee::get_batch_base_fee, }; @@ -32,10 +32,10 @@ pub(crate) fn test_default_aa_interaction() { let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.l1_batch_env); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); - vm.vm.execute(VmExecutionMode::Batch); + vm.vm.finish_batch(default_pubdata_builder()); vm.vm.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs index d3ffee20c34..9d0908807e2 100644 --- a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs @@ -15,7 +15,8 @@ use super::{ }; use crate::{ interface::{ - ExecutionResult, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, + ExecutionResult, InspectExecutionMode, TxExecutionMode, VmExecutionResultAndLogs, + VmInterfaceExt, }, versions::testonly::ContractToDeploy, }; @@ -35,7 +36,7 @@ pub(crate) fn test_get_used_contracts() { let account = &mut vm.rich_accounts[0]; let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); assert!(vm @@ -70,7 +71,7 @@ pub(crate) fn test_get_used_contracts() { vm.vm.push_transaction(tx2.clone()); - let res2 = vm.vm.execute(VmExecutionMode::OneTx); + let res2 = vm.vm.execute(InspectExecutionMode::OneTx); assert!(res2.result.is_failed()); diff --git a/core/lib/multivm/src/versions/testonly/is_write_initial.rs b/core/lib/multivm/src/versions/testonly/is_write_initial.rs index ef1fe2088c1..cac9be17363 100644 --- a/core/lib/multivm/src/versions/testonly/is_write_initial.rs +++ b/core/lib/multivm/src/versions/testonly/is_write_initial.rs @@ -2,7 +2,9 @@ use zksync_test_account::TxType; use zksync_types::get_nonce_key; use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; -use crate::interface::{storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{ + storage::ReadStorage, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, +}; pub(crate) fn test_is_write_initial_behaviour() { // In this test, we check result of `is_write_initial` at different stages. @@ -27,7 +29,7 @@ pub(crate) fn test_is_write_initial_behaviour() { let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); // Check that `is_write_initial` still returns true for the nonce key. assert!(vm diff --git a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs index 212b1f16f20..e98a8385f02 100644 --- a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs @@ -11,7 +11,7 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use super::{read_test_contract, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, utils::StorageWritesDeduplicator, }; @@ -60,7 +60,7 @@ pub(crate) fn test_l1_tx_execution() { vm.vm.push_transaction(deploy_tx.tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); // The code hash of the deployed contract should be marked as republished. let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); @@ -84,7 +84,7 @@ pub(crate) fn test_l1_tx_execution() { TxType::L1 { serial_id: 0 }, ); vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); @@ -99,7 +99,7 @@ pub(crate) fn test_l1_tx_execution() { TxType::L1 { serial_id: 0 }, ); vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); // We changed one slot inside contract. @@ -110,7 +110,7 @@ pub(crate) fn test_l1_tx_execution() { assert_eq!(res.repeated_storage_writes, 0); vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; + let storage_logs = vm.vm.execute(InspectExecutionMode::OneTx).logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. // But now the base pubdata spent has changed too. @@ -125,7 +125,7 @@ pub(crate) fn test_l1_tx_execution() { TxType::L1 { serial_id: 1 }, ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); // Method is not payable tx should fail assert!(result.result.is_failed(), "The transaction should fail"); @@ -176,7 +176,7 @@ pub(crate) fn test_l1_tx_execution_high_gas_limit() { vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(res.result.is_failed(), "The transaction should've failed"); } diff --git a/core/lib/multivm/src/versions/testonly/l2_blocks.rs b/core/lib/multivm/src/versions/testonly/l2_blocks.rs index 634a9b34bf6..947d8b5859f 100644 --- a/core/lib/multivm/src/versions/testonly/l2_blocks.rs +++ b/core/lib/multivm/src/versions/testonly/l2_blocks.rs @@ -17,8 +17,8 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use super::{default_l1_batch, get_empty_storage, tester::VmTesterBuilder, TestedVm}; use crate::{ interface::{ - storage::StorageView, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterfaceExt, + storage::StorageView, ExecutionResult, Halt, InspectExecutionMode, L2BlockEnv, + TxExecutionMode, VmInterfaceExt, }, vm_latest::{ constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, @@ -66,7 +66,7 @@ pub(crate) fn test_l2_block_initialization_timestamp() { let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert_matches!( res.result, @@ -100,7 +100,7 @@ pub(crate) fn test_l2_block_initialization_number_non_zero() { set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( res.result, @@ -128,7 +128,7 @@ fn test_same_l2_block( let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!res.result.is_failed()); let mut current_l2_block = vm.l1_batch_env.first_l2_block; @@ -147,7 +147,7 @@ fn test_same_l2_block( vm.vm.push_transaction(l1_tx); set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); @@ -203,7 +203,7 @@ fn test_new_l2_block( // Firstly we execute the first transaction vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); let mut second_l2_block = vm.l1_batch_env.first_l2_block; second_l2_block.number += 1; @@ -223,7 +223,7 @@ fn test_new_l2_block( vm.vm.push_l2_block_unchecked(second_l2_block); vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); } else { @@ -350,7 +350,7 @@ fn test_first_in_batch( vm.vm.push_transaction(l1_tx); set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); } else { diff --git a/core/lib/multivm/src/versions/testonly/mod.rs b/core/lib/multivm/src/versions/testonly/mod.rs index 74cda6a9522..eece1d475bb 100644 --- a/core/lib/multivm/src/versions/testonly/mod.rs +++ b/core/lib/multivm/src/versions/testonly/mod.rs @@ -9,7 +9,7 @@ //! - Tests use [`VmTester`] built using [`VmTesterBuilder`] to create a VM instance. This allows to set up storage for the VM, //! custom [`SystemEnv`] / [`L1BatchEnv`], deployed contracts, pre-funded accounts etc. -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use ethabi::Contract; use once_cell::sync::Lazy; @@ -23,11 +23,14 @@ use zksync_types::{ ProtocolVersionId, U256, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, +}; pub(super) use self::tester::{TestedVm, VmTester, VmTesterBuilder}; use crate::{ - interface::storage::InMemoryStorage, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + interface::storage::InMemoryStorage, pubdata_builders::RollupPubdataBuilder, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; pub(super) mod block_tip; @@ -175,6 +178,10 @@ pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { } } +pub(super) fn default_pubdata_builder() -> Rc { + Rc::new(RollupPubdataBuilder::new(Address::zero())) +} + pub(super) fn make_address_rich(storage: &mut InMemoryStorage, address: Address) { let key = storage_key_for_eth_balance(&address); storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); diff --git a/core/lib/multivm/src/versions/testonly/nonce_holder.rs b/core/lib/multivm/src/versions/testonly/nonce_holder.rs index 8ef120c693c..36f736c0bbe 100644 --- a/core/lib/multivm/src/versions/testonly/nonce_holder.rs +++ b/core/lib/multivm/src/versions/testonly/nonce_holder.rs @@ -3,7 +3,7 @@ use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; use super::{read_nonce_holder_tester, tester::VmTesterBuilder, ContractToDeploy, TestedVm}; use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, TxRevertReason, VmInterfaceExt, VmRevertReason, }; @@ -53,7 +53,7 @@ fn run_nonce_test( }; tx_data.signature = vec![test_mode.into()]; vm.push_transaction(transaction); - let result = vm.execute(VmExecutionMode::OneTx); + let result = vm.execute(InspectExecutionMode::OneTx); if let Some(msg) = error_message { let expected_error = diff --git a/core/lib/multivm/src/versions/testonly/precompiles.rs b/core/lib/multivm/src/versions/testonly/precompiles.rs index 270afab0731..2e26dc134b0 100644 --- a/core/lib/multivm/src/versions/testonly/precompiles.rs +++ b/core/lib/multivm/src/versions/testonly/precompiles.rs @@ -3,7 +3,7 @@ use zksync_types::{Address, Execute}; use super::{read_precompiles_contract, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, versions::testonly::ContractToDeploy, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -36,7 +36,7 @@ pub(crate) fn test_keccak() { ); vm.vm.push_transaction(tx); - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let keccak_count = exec_result.statistics.circuit_statistic.keccak256 @@ -72,7 +72,7 @@ pub(crate) fn test_sha256() { ); vm.vm.push_transaction(tx); - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let sha_count = exec_result.statistics.circuit_statistic.sha256 @@ -101,7 +101,7 @@ pub(crate) fn test_ecrecover() { ); vm.vm.push_transaction(tx); - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover diff --git a/core/lib/multivm/src/versions/testonly/refunds.rs b/core/lib/multivm/src/versions/testonly/refunds.rs index 565607dff10..874425fc435 100644 --- a/core/lib/multivm/src/versions/testonly/refunds.rs +++ b/core/lib/multivm/src/versions/testonly/refunds.rs @@ -3,10 +3,10 @@ use zksync_test_account::TxType; use zksync_types::{Address, Execute, U256}; use super::{ - read_expensive_contract, read_test_contract, tester::VmTesterBuilder, ContractToDeploy, - TestedVm, + default_pubdata_builder, read_expensive_contract, read_test_contract, tester::VmTesterBuilder, + ContractToDeploy, TestedVm, }; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; pub(crate) fn test_predetermined_refunded_gas() { // In this test, we compare the execution of the bootloader with the predefined @@ -24,7 +24,7 @@ pub(crate) fn test_predetermined_refunded_gas() { let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); @@ -37,7 +37,10 @@ pub(crate) fn test_predetermined_refunded_gas() { ); assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let result_without_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result_without_predefined_refunds.result.is_failed(),); @@ -56,7 +59,10 @@ pub(crate) fn test_predetermined_refunded_gas() { vm.vm .push_transaction_with_refund(tx.clone(), result.refunds.gas_refunded); - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let result_with_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result_with_predefined_refunds.result.is_failed()); @@ -107,7 +113,10 @@ pub(crate) fn test_predetermined_refunded_gas() { let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; vm.vm .push_transaction_with_refund(tx, changed_operator_suggested_refund); - let result = vm.vm.execute(VmExecutionMode::Batch); + let result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result.result.is_failed()); @@ -185,7 +194,7 @@ pub(crate) fn test_negative_pubdata_for_transaction() { None, ); vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" @@ -202,7 +211,7 @@ pub(crate) fn test_negative_pubdata_for_transaction() { None, ); vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "Transaction wasn't successful: {result:#?}" diff --git a/core/lib/multivm/src/versions/testonly/require_eip712.rs b/core/lib/multivm/src/versions/testonly/require_eip712.rs index 1ea3964d7cd..e789fbda290 100644 --- a/core/lib/multivm/src/versions/testonly/require_eip712.rs +++ b/core/lib/multivm/src/versions/testonly/require_eip712.rs @@ -8,7 +8,7 @@ use zksync_types::{ use super::{ read_many_owners_custom_account_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, }; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. @@ -52,7 +52,7 @@ pub(crate) fn test_require_eip712() { ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); let private_account_balance = vm.get_eth_balance(private_account.address); @@ -85,7 +85,7 @@ pub(crate) fn test_require_eip712() { let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); assert_eq!( @@ -133,7 +133,7 @@ pub(crate) fn test_require_eip712() { let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( vm.get_eth_balance(beneficiary_address), diff --git a/core/lib/multivm/src/versions/testonly/secp256r1.rs b/core/lib/multivm/src/versions/testonly/secp256r1.rs index 60197913601..37d428f8210 100644 --- a/core/lib/multivm/src/versions/testonly/secp256r1.rs +++ b/core/lib/multivm/src/versions/testonly/secp256r1.rs @@ -4,7 +4,7 @@ use zksync_types::{web3::keccak256, Execute, H256, U256}; use zksync_utils::h256_to_u256; use super::{tester::VmTesterBuilder, TestedVm}; -use crate::interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{ExecutionResult, InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; pub(crate) fn test_secp256r1() { // In this test, we aim to test whether a simple account interaction (without any fee logic) @@ -55,7 +55,7 @@ pub(crate) fn test_secp256r1() { vm.vm.push_transaction(tx); - let execution_result = vm.vm.execute(VmExecutionMode::Batch); + let execution_result = vm.vm.execute(InspectExecutionMode::OneTx); let ExecutionResult::Success { output } = execution_result.result else { panic!("batch failed") diff --git a/core/lib/multivm/src/versions/testonly/simple_execution.rs b/core/lib/multivm/src/versions/testonly/simple_execution.rs index fcd7a144ab1..96239fb362d 100644 --- a/core/lib/multivm/src/versions/testonly/simple_execution.rs +++ b/core/lib/multivm/src/versions/testonly/simple_execution.rs @@ -1,8 +1,8 @@ use assert_matches::assert_matches; use zksync_test_account::TxType; -use super::{tester::VmTesterBuilder, TestedVm}; -use crate::interface::{ExecutionResult, VmExecutionMode, VmInterfaceExt}; +use super::{default_pubdata_builder, tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, InspectExecutionMode, VmInterfaceExt}; pub(crate) fn test_estimate_fee() { let mut vm_tester = VmTesterBuilder::new() @@ -23,7 +23,7 @@ pub(crate) fn test_estimate_fee() { vm_tester.vm.push_transaction(tx); - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); + let result = vm_tester.vm.execute(InspectExecutionMode::OneTx); assert_matches!(result.result, ExecutionResult::Success { .. }); } @@ -64,12 +64,14 @@ pub(crate) fn test_simple_execute() { vm.push_transaction(tx1); vm.push_transaction(tx2); vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); + let block_tip = vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/testonly/storage.rs b/core/lib/multivm/src/versions/testonly/storage.rs index 4951272a60c..efe7be1edbd 100644 --- a/core/lib/multivm/src/versions/testonly/storage.rs +++ b/core/lib/multivm/src/versions/testonly/storage.rs @@ -3,7 +3,7 @@ use zksync_contracts::{load_contract, read_bytecode}; use zksync_types::{Address, Execute, U256}; use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm}; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { let bytecode = read_bytecode( @@ -45,20 +45,20 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Ve vm.vm.make_snapshot(); vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "First tx failed"); vm.vm.pop_snapshot_no_rollback(); // We rollback once because transient storage and rollbacks are a tricky combination. vm.vm.make_snapshot(); vm.vm.push_transaction(tx2.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Second tx failed"); vm.vm.rollback_to_the_latest_snapshot(); vm.vm.make_snapshot(); vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Second tx failed on second run"); result.statistics.pubdata_published diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs index 7432322e0c8..716b9386235 100644 --- a/core/lib/multivm/src/versions/testonly/tester/mod.rs +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, fmt}; +use std::{collections::HashSet, fmt, rc::Rc}; use zksync_contracts::BaseSystemContracts; use zksync_test_account::{Account, TxType}; @@ -8,7 +8,8 @@ use zksync_types::{ Address, L1BatchNumber, StorageKey, Transaction, H256, U256, }; use zksync_vm_interface::{ - CurrentExecutionState, VmExecutionResultAndLogs, VmInterfaceHistoryEnabled, + pubdata::PubdataBuilder, CurrentExecutionState, InspectExecutionMode, VmExecutionResultAndLogs, + VmInterfaceHistoryEnabled, }; pub(crate) use self::transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; @@ -16,8 +17,7 @@ use super::{get_empty_storage, read_test_contract}; use crate::{ interface::{ storage::{InMemoryStorage, StoragePtr, StorageView}, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, - VmInterfaceExt, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterfaceExt, }, versions::testonly::{ default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, @@ -44,7 +44,7 @@ impl VmTester { let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce = tx.nonce().unwrap().0.into(); self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); + self.vm.execute(InspectExecutionMode::OneTx); let deployed_address = deployed_address_create(account.address, nonce); self.test_contract = Some(deployed_address); } @@ -187,12 +187,14 @@ pub(crate) trait TestedVm: /// Unlike [`Self::known_bytecode_hashes()`], the output should only include successfully decommitted bytecodes. fn decommitted_hashes(&self) -> HashSet; - fn execute_with_state_diffs( + fn finish_batch_with_state_diffs( &mut self, diffs: Vec, - mode: VmExecutionMode, + pubdata_builder: Rc, ) -> VmExecutionResultAndLogs; + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs; + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]); /// Includes bytecodes that have failed to decommit. Should exclude base system contract bytecodes (default AA / EVM emulator). diff --git a/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs index 87468d3e4d5..b9373e331c3 100644 --- a/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs @@ -1,9 +1,12 @@ use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160}; use super::{TestedVm, VmTester}; -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterfaceExt, VmRevertReason, +use crate::{ + interface::{ + CurrentExecutionState, ExecutionResult, Halt, InspectExecutionMode, TxRevertReason, + VmExecutionResultAndLogs, VmInterfaceExt, VmRevertReason, + }, + versions::testonly::default_pubdata_builder, }; #[derive(Debug, Clone)] @@ -181,7 +184,7 @@ impl VmTester { for tx_test_info in txs { self.execute_tx_and_verify(tx_test_info.clone()); } - self.vm.execute(VmExecutionMode::Batch); + self.vm.finish_batch(default_pubdata_builder()); let mut state = self.vm.get_current_execution_state(); state.used_contract_hashes.sort(); state @@ -202,7 +205,7 @@ fn execute_tx_and_verify( let inner_state_before = vm.dump_state(); vm.make_snapshot(); vm.push_transaction(tx_test_info.tx.clone()); - let result = vm.execute(VmExecutionMode::OneTx); + let result = vm.execute(InspectExecutionMode::OneTx); tx_test_info.verify_result(&result); if tx_test_info.should_rollback() { vm.rollback_to_the_latest_snapshot(); diff --git a/core/lib/multivm/src/versions/testonly/transfer.rs b/core/lib/multivm/src/versions/testonly/transfer.rs index 051826a64f2..3572adba147 100644 --- a/core/lib/multivm/src/versions/testonly/transfer.rs +++ b/core/lib/multivm/src/versions/testonly/transfer.rs @@ -3,8 +3,10 @@ use zksync_contracts::{load_contract, read_bytecode}; use zksync_types::{utils::storage_key_for_eth_balance, Address, Execute, U256}; use zksync_utils::u256_to_h256; -use super::{get_empty_storage, tester::VmTesterBuilder, ContractToDeploy, TestedVm}; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use super::{ + default_pubdata_builder, get_empty_storage, tester::VmTesterBuilder, ContractToDeploy, TestedVm, +}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; enum TestOptions { Send(U256), @@ -72,13 +74,16 @@ fn test_send_or_transfer(test_option: TestOptions) { ); vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); + let tx_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !tx_result.result.is_failed(), "Transaction wasn't successful" ); - let batch_result = vm.vm.execute(VmExecutionMode::Batch); + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); let new_recipient_balance = vm.get_eth_balance(recipient_address); @@ -161,7 +166,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOp ); vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); + let tx1_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !tx1_result.result.is_failed(), "Transaction 1 wasn't successful" @@ -178,13 +183,16 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOp ); vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); + let tx2_result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( tx2_result.result.is_failed(), "Transaction 2 should have failed, but it succeeded" ); - let batch_result = vm.vm.execute(VmExecutionMode::Batch); + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); } diff --git a/core/lib/multivm/src/versions/testonly/upgrade.rs b/core/lib/multivm/src/versions/testonly/upgrade.rs index 9401cbb4ba8..359f19faedb 100644 --- a/core/lib/multivm/src/versions/testonly/upgrade.rs +++ b/core/lib/multivm/src/versions/testonly/upgrade.rs @@ -14,7 +14,9 @@ use super::{ get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, read_test_contract, tester::VmTesterBuilder, TestedVm, }; -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}; +use crate::interface::{ + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, +}; /// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: /// - This transaction must be the only one in block @@ -71,9 +73,9 @@ pub(crate) fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(normal_l1_transaction.clone()); vm.vm.push_transaction(another_protocol_upgrade_transaction); - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( result.result, ExecutionResult::Halt { @@ -87,8 +89,8 @@ pub(crate) fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(normal_l1_transaction.clone()); vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( result.result, ExecutionResult::Halt { @@ -101,8 +103,8 @@ pub(crate) fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(protocol_upgrade_transaction); vm.vm.push_transaction(normal_l1_transaction); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); } @@ -137,7 +139,7 @@ pub(crate) fn test_force_deploy_upgrade() { vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "The force upgrade was not successful" @@ -186,7 +188,7 @@ pub(crate) fn test_complex_upgrader() { ); vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "The force upgrade was not successful" diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 31457fc9676..d9768652c2f 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -1,16 +1,16 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::Transaction; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, - L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, + VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, utils::bytecode, @@ -61,7 +61,7 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -70,7 +70,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => { + InspectExecutionMode::OneTx => { match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer @@ -93,8 +93,7 @@ impl VmInterface for Vm { .glue_into(), } } - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -184,7 +183,7 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_1_3_2::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs index 238804bc7fc..6f927c5c99a 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_1::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_1_4_1::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs index d07732ae435..c1ca93152a0 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 1c38958bb31..af483feedd7 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -1,8 +1,11 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_1::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -95,9 +98,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -128,8 +131,12 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs index ffe65b5e050..6c4f737f9e9 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_1::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_1_4_2::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs index d07732ae435..c1ca93152a0 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index ca69a191e26..e7c8e7acdd9 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -1,10 +1,11 @@ -use std::mem; +use std::{mem, rc::Rc}; use circuit_sequencer_api_1_4_2::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -97,9 +98,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(mem::take(tracer), execution_mode, None) + self.inspect_inner(mem::take(tracer), execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -130,8 +131,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(TracerDispatcher::default(), VmExecutionMode::Batch, None); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs index 326a5789612..2f7d141cb0a 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_0::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_boojum_integration::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs index 9df9009831f..152ccad2fbc 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index bfd055a5cc8..43c9900486d 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -1,8 +1,11 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -95,9 +98,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -129,8 +132,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_fast/pubdata.rs b/core/lib/multivm/src/versions/vm_fast/pubdata.rs index d07732ae435..c1ca93152a0 100644 --- a/core/lib/multivm/src/versions/vm_fast/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_fast/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index f385ca2a438..2b4665f8224 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -1,11 +1,11 @@ -use std::{any::Any, collections::HashSet, fmt}; +use std::{any::Any, collections::HashSet, fmt, rc::Rc}; use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H160, H256, U256}; use zksync_utils::h256_to_u256; use zksync_vm2::interface::{Event, HeapId, StateInterface}; use zksync_vm_interface::{ - storage::ReadStorage, CurrentExecutionState, L2BlockEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmInterfaceExt, + pubdata::PubdataBuilder, storage::ReadStorage, CurrentExecutionState, L2BlockEnv, + VmExecutionMode, VmExecutionResultAndLogs, VmInterface, }; use super::Vm; @@ -99,13 +99,18 @@ impl TestedVm for Vm> { self.decommitted_hashes().collect() } - fn execute_with_state_diffs( + fn finish_batch_with_state_diffs( &mut self, diffs: Vec, - mode: VmExecutionMode, + pubdata_builder: Rc, ) -> VmExecutionResultAndLogs { self.enforce_state_diffs(diffs); - self.execute(mode) + self.finish_batch(pubdata_builder) + .block_tip_execution_result + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner(&mut Default::default(), VmExecutionMode::Batch) } fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 88e0b10b5ea..a2114a33948 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, mem}; +use std::{collections::HashMap, fmt, mem, rc::Rc}; use zk_evm_1_5_0::{ aux_structures::LogQuery, zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, @@ -21,6 +21,7 @@ use zksync_vm2::{ interface::{CallframeInterface, HeapId, StateInterface, Tracer}, ExecutionEnd, FatPointer, Program, Settings, StorageSlot, VirtualMachine, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use super::{ bootloader_state::{BootloaderState, BootloaderStateSnapshot}, @@ -103,7 +104,7 @@ pub struct Vm { enforced_state_diffs: Option>, } -impl Vm { +impl Vm { pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { assert!( is_supported_by_fast_vm(system_env.version), @@ -533,39 +534,10 @@ impl Vm { pubdata_costs: world_diff.pubdata_costs().to_vec(), } } -} -impl VmFactory> for Vm, Tr> -where - S: ReadStorage, - Tr: Tracer + Default + 'static, -{ - fn new( - batch_env: L1BatchEnv, - system_env: SystemEnv, - storage: StoragePtr>, - ) -> Self { - let storage = ImmutableStorageView::new(storage); - Self::custom(batch_env, system_env, storage) - } -} - -impl VmInterface for Vm { - type TracerDispatcher = Tr; - - fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { - self.push_transaction_inner(tx, 0, true); - PushTransactionResult { - compressed_bytecodes: self - .bootloader_state - .get_last_tx_compressed_bytecodes() - .into(), - } - } - - fn inspect( + pub(crate) fn inspect_inner( &mut self, - tracer: &mut Self::TracerDispatcher, + tracer: &mut Tr, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { let mut track_refunds = false; @@ -655,6 +627,43 @@ impl VmInterface for Vm { new_known_factory_deps: None, } } +} + +impl VmFactory> for Vm, Tr> +where + S: ReadStorage, + Tr: Tracer + Default + 'static, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let storage = ImmutableStorageView::new(storage); + Self::custom(batch_env, system_env, storage) + } +} + +impl VmInterface for Vm { + type TracerDispatcher = Tr; + + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_inner(tx, 0, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } + } + + fn inspect( + &mut self, + tracer: &mut Self::TracerDispatcher, + execution_mode: InspectExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode.into()) + } fn inspect_transaction_with_bytecode_compression( &mut self, @@ -663,7 +672,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_inner(tx, 0, with_compression); - let result = self.inspect(tracer, VmExecutionMode::OneTx); + let result = self.inspect(tracer, InspectExecutionMode::OneTx); let compression_result = if self.has_unpublished_bytecodes() { Err(BytecodeCompressionError::BytecodeCompressionFailed) @@ -680,8 +689,8 @@ impl VmInterface for Vm { self.bootloader_state.start_new_l2_block(l2_block_env) } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut Tr::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut Tr::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index 4ba27b14bad..2085bbaba31 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -1,11 +1,15 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; -use zksync_types::{L2ChainId, U256}; +use zksync_types::{L2ChainId, ProtocolVersionId, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, + interface::{ + pubdata::PubdataInput, BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, + TxExecutionMode, + }, vm_latest::{ bootloader_state::{ l2_block::BootloaderL2Block, @@ -13,7 +17,7 @@ use crate::{ utils::{apply_l2_block, apply_tx_to_memory}, }, constants::TX_DESCRIPTION_OFFSET, - types::internals::{PubdataInput, TransactionData}, + types::internals::TransactionData, utils::l2_blocks::assert_next_block, }, }; @@ -45,6 +49,8 @@ pub struct BootloaderState { free_tx_offset: usize, /// Information about the the pubdata that will be needed to supply to the L1Messenger pubdata_information: OnceCell, + /// Protocol version. + protocol_version: ProtocolVersionId, } impl BootloaderState { @@ -52,6 +58,7 @@ impl BootloaderState { execution_mode: TxExecutionMode, initial_memory: BootloaderMemory, first_l2_block: L2BlockEnv, + protocol_version: ProtocolVersionId, ) -> Self { let l2_block = BootloaderL2Block::new(first_l2_block, 0); Self { @@ -62,6 +69,7 @@ impl BootloaderState { execution_mode, free_tx_offset: 0, pubdata_information: Default::default(), + protocol_version, } } @@ -135,18 +143,31 @@ impl BootloaderState { pub(crate) fn last_l2_block(&self) -> &BootloaderL2Block { self.l2_blocks.last().unwrap() } + pub(crate) fn get_pubdata_information(&self) -> &PubdataInput { self.pubdata_information .get() .expect("Pubdata information is not set") } + pub(crate) fn settlement_layer_pubdata(&self, pubdata_builder: &dyn PubdataBuilder) -> Vec { + let pubdata_information = self + .pubdata_information + .get() + .expect("Pubdata information is not set"); + + pubdata_builder.settlement_layer_pubdata(pubdata_information, self.protocol_version) + } + fn last_mut_l2_block(&mut self) -> &mut BootloaderL2Block { self.l2_blocks.last_mut().unwrap() } /// Apply all bootloader transaction to the initial memory - pub(crate) fn bootloader_memory(&self) -> BootloaderMemory { + pub(crate) fn bootloader_memory( + &self, + pubdata_builder: &dyn PubdataBuilder, + ) -> BootloaderMemory { let mut initial_memory = self.initial_memory.clone(); let mut offset = 0; let mut compressed_bytecodes_offset = 0; @@ -174,11 +195,15 @@ impl BootloaderState { let pubdata_information = self .pubdata_information - .clone() - .into_inner() + .get() .expect("Empty pubdata information"); - apply_pubdata_to_memory(&mut initial_memory, pubdata_information); + apply_pubdata_to_memory( + &mut initial_memory, + pubdata_builder, + pubdata_information, + self.protocol_version, + ); initial_memory } @@ -291,4 +316,8 @@ impl BootloaderState { ); } } + + pub(crate) fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version + } } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 23c079202c1..c409bda35c1 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -1,9 +1,12 @@ -use zksync_types::{ethabi, U256}; +use zksync_types::{ethabi, ProtocolVersionId, U256}; use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + interface::{ + pubdata::{PubdataBuilder, PubdataInput}, + BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode, + }, utils::bytecode, vm_latest::{ bootloader_state::l2_block::BootloaderL2Block, @@ -14,7 +17,6 @@ use crate::{ TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, }, - types::internals::PubdataInput, }, }; @@ -124,26 +126,61 @@ fn apply_l2_block_inner( ]) } +fn bootloader_memory_input( + pubdata_builder: &dyn PubdataBuilder, + input: &PubdataInput, + protocol_version: ProtocolVersionId, +) -> Vec { + let l2_da_validator_address = pubdata_builder.l2_da_validator(); + let operator_input = pubdata_builder.l1_messenger_operator_input(input, protocol_version); + + ethabi::encode(&[ + ethabi::Token::Address(l2_da_validator_address), + ethabi::Token::Bytes(operator_input), + ]) +} + pub(crate) fn apply_pubdata_to_memory( memory: &mut BootloaderMemory, - pubdata_information: PubdataInput, + pubdata_builder: &dyn PubdataBuilder, + pubdata_information: &PubdataInput, + protocol_version: ProtocolVersionId, ) { - // Skipping two slots as they will be filled by the bootloader itself: - // - One slot is for the selector of the call to the L1Messenger. - // - The other slot is for the 0x20 offset for the calldata. - let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; - - // Need to skip first word as it represents array offset - // while bootloader expects only [len || data] - let pubdata = ethabi::encode(&[ethabi::Token::Bytes( - pubdata_information.build_pubdata(true), - )])[32..] - .to_vec(); - - assert!( - pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, - "The encoded pubdata is too big" - ); + let (l1_messenger_pubdata_start_slot, pubdata) = if protocol_version.is_pre_gateway() { + // Skipping two slots as they will be filled by the bootloader itself: + // - One slot is for the selector of the call to the L1Messenger. + // - The other slot is for the 0x20 offset for the calldata. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; + + // Need to skip first word as it represents array offset + // while bootloader expects only [len || data] + let pubdata = ethabi::encode(&[ethabi::Token::Bytes( + pubdata_builder.l1_messenger_operator_input(pubdata_information, protocol_version), + )])[32..] + .to_vec(); + + assert!( + pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) + } else { + // Skipping the first slot as it will be filled by the bootloader itself: + // It is for the selector of the call to the L1Messenger. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 1; + + let pubdata = + bootloader_memory_input(pubdata_builder, pubdata_information, protocol_version); + + assert!( + // Note that unlike the previous version, the difference is `1`, since now it also includes the offset + pubdata.len() / 32 < OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) + }; pubdata .chunks(32) diff --git a/core/lib/multivm/src/versions/vm_latest/constants.rs b/core/lib/multivm/src/versions/vm_latest/constants.rs index 01f697ec91a..c047e6ffa3b 100644 --- a/core/lib/multivm/src/versions/vm_latest/constants.rs +++ b/core/lib/multivm/src/versions/vm_latest/constants.rs @@ -26,6 +26,7 @@ pub(crate) const fn get_used_bootloader_memory_bytes(subversion: MultiVMSubversi match subversion { MultiVMSubversion::SmallBootloaderMemory => 59_000_000, MultiVMSubversion::IncreasedBootloaderMemory => 63_800_000, + MultiVMSubversion::Gateway => 63_800_000, } } @@ -201,6 +202,6 @@ pub(crate) const TX_SLOT_OVERHEAD_GAS: u32 = 10_000; /// getting often sealed due to the memory limit being reached, the L2 fair gas price will be increased. pub(crate) const TX_MEMORY_OVERHEAD_GAS: u32 = 10; -const ZK_SYNC_BYTES_PER_BLOB: usize = BLOB_CHUNK_SIZE * ELEMENTS_PER_4844_BLOCK; +pub(crate) const ZK_SYNC_BYTES_PER_BLOB: usize = BLOB_CHUNK_SIZE * ELEMENTS_PER_4844_BLOCK; pub const MAX_BLOBS_PER_BATCH: usize = 6; pub const MAX_VM_PUBDATA_PER_BATCH: usize = MAX_BLOBS_PER_BATCH * ZK_SYNC_BYTES_PER_BLOB; diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index e70f05f85ef..d9331720ce2 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -69,6 +69,7 @@ impl Vm { self.batch_env.clone(), execution_mode, self.subversion, + None, )) }), self.subversion, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index e1dfdc7e68c..b502ea50b1a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -5,7 +5,7 @@ use zksync_types::{Address, Execute}; use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface}, tracers::CallTracer, versions::testonly::{ read_max_depth_contract, read_test_contract, ContractToDeploy, VmTesterBuilder, @@ -43,7 +43,7 @@ fn test_max_depth() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); assert!(result.get().is_some()); assert!(res.result.is_failed()); } @@ -79,7 +79,7 @@ fn test_basic_behavior() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); let call_tracer_result = result.get().unwrap(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index 6f748d543d3..96d59f208b0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -1,4 +1,7 @@ -use std::collections::{HashMap, HashSet}; +use std::{ + collections::{HashMap, HashSet}, + rc::Rc, +}; use zk_evm_1_5_0::{ aux_structures::{MemoryPage, Timestamp}, @@ -7,6 +10,7 @@ use zk_evm_1_5_0::{ }; use zksync_types::{writes::StateDiffRecord, StorageKey, StorageValue, Transaction, H256, U256}; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use super::{HistoryEnabled, Vm}; use crate::{ @@ -75,18 +79,31 @@ impl TestedVm for TestedLatestVm { self.get_used_contracts().into_iter().collect() } - fn execute_with_state_diffs( + fn finish_batch_with_state_diffs( &mut self, diffs: Vec, - mode: VmExecutionMode, + pubdata_builder: Rc, ) -> VmExecutionResultAndLogs { let pubdata_tracer = PubdataTracer::new_with_forced_state_diffs( self.batch_env.clone(), VmExecutionMode::Batch, diffs, crate::vm_latest::MultiVMSubversion::latest(), + Some(pubdata_builder), ); - self.inspect_inner(&mut TracerDispatcher::default(), mode, Some(pubdata_tracer)) + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + Some(pubdata_tracer), + ) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ) } fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index 838c4e342dc..7028f7a8971 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -6,7 +6,7 @@ use zksync_types::{utils::deployed_address_create, Execute, U256}; use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface, VmInterfaceExt}, tracers::PrestateTracer, versions::testonly::{read_simple_transfer_contract, VmTesterBuilder}, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, @@ -38,7 +38,7 @@ fn test_prestate_tracer() { let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Batch); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::OneTx); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() @@ -61,7 +61,7 @@ fn test_prestate_tracer_diff_mode() { let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce = tx.nonce().unwrap().0.into(); vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); let deployed_address = deployed_address_create(account.address, nonce); vm.test_contract = Some(deployed_address); @@ -69,7 +69,7 @@ fn test_prestate_tracer_diff_mode() { let tx2 = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce2 = tx2.nonce().unwrap().0.into(); vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); let deployed_address2 = deployed_address_create(account.address, nonce2); let account = &mut vm.rich_accounts[0]; @@ -98,7 +98,7 @@ fn test_prestate_tracer_diff_mode() { let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Bootloader); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::Bootloader); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index c948315266a..de674498427 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -2,13 +2,14 @@ use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; use zksync_test_account::{DeployContractsTx, TxType}; use zksync_types::{get_nonce_key, U256}; +use zksync_vm_interface::InspectExecutionMode; use super::TestedLatestVm; use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + TxExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, versions::testonly::{ @@ -80,7 +81,7 @@ fn test_layered_rollback() { TxType::L2, ); vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); + let deployment_res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!deployment_res.result.is_failed(), "transaction failed"); let loadnext_transaction = account.get_loadnext_transaction( @@ -107,7 +108,8 @@ fn test_layered_rollback() { max_recursion_depth: 15, } .into_tracer_pointer(); - vm.vm.inspect(&mut tracer.into(), VmExecutionMode::OneTx); + vm.vm + .inspect(&mut tracer.into(), InspectExecutionMode::OneTx); let nonce_val2 = vm .vm @@ -134,7 +136,7 @@ fn test_layered_rollback() { ); vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "transaction must not fail"); } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index 32f3984834c..998e8a13ad2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -1,4 +1,4 @@ -use std::marker::PhantomData; +use std::{marker::PhantomData, rc::Rc}; use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zk_evm_1_5_0::{ @@ -7,9 +7,11 @@ use zk_evm_1_5_0::{ }; use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use crate::{ interface::{ + pubdata::{L1MessengerL2ToL1Log, PubdataInput}, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,14 +19,14 @@ use crate::{ tracers::dynamic::vm_1_5_0::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_latest::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, constants::BOOTLOADER_HEAP_PAGE, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, tracers::{traits::VmTracer, utils::VmHook}, - types::internals::{PubdataInput, ZkSyncVmState}, + types::internals::ZkSyncVmState, utils::logs::collect_events_and_l1_system_logs_after_timestamp, vm::MultiVMSubversion, StorageOracle, @@ -41,6 +43,7 @@ pub(crate) struct PubdataTracer { // to the L1Messenger. enforced_state_diffs: Option>, subversion: MultiVMSubversion, + pubdata_builder: Option>, _phantom_data: PhantomData, } @@ -49,6 +52,7 @@ impl PubdataTracer { l1_batch_env: L1BatchEnv, execution_mode: VmExecutionMode, subversion: MultiVMSubversion, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -56,6 +60,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: None, subversion, + pubdata_builder, _phantom_data: Default::default(), } } @@ -68,6 +73,7 @@ impl PubdataTracer { execution_mode: VmExecutionMode, forced_state_diffs: Vec, subversion: MultiVMSubversion, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -75,6 +81,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: Some(forced_state_diffs), subversion, + pubdata_builder, _phantom_data: Default::default(), } } @@ -221,13 +228,22 @@ impl VmTracer for PubdataTracer { if self.pubdata_info_requested { let pubdata_input = self.build_pubdata_input(state); - // Save the pubdata for the future initial bootloader memory building - bootloader_state.set_pubdata_input(pubdata_input.clone()); - // Apply the pubdata to the current memory let mut memory_to_apply = vec![]; - apply_pubdata_to_memory(&mut memory_to_apply, pubdata_input); + apply_pubdata_to_memory( + &mut memory_to_apply, + self.pubdata_builder + .as_ref() + .expect("`pubdata_builder` is required to finish batch") + .as_ref(), + &pubdata_input, + bootloader_state.protocol_version(), + ); + + // Save the pubdata for the future initial bootloader memory building + bootloader_state.set_pubdata_input(pubdata_input); + state.memory.populate_page( BOOTLOADER_HEAP_PAGE as usize, memory_to_apply, diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs index 7dc60ec5b0f..601b7b8bd01 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs @@ -1,9 +1,7 @@ -pub(crate) use pubdata::PubdataInput; pub(crate) use snapshot::VmSnapshot; pub(crate) use transaction_data::TransactionData; pub(crate) use vm_state::new_vm_state; pub use vm_state::ZkSyncVmState; -mod pubdata; mod snapshot; mod transaction_data; mod vm_state; diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs deleted file mode 100644 index d07732ae435..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs +++ /dev/null @@ -1,123 +0,0 @@ -use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; - -use crate::utils::events::L1MessengerL2ToL1Log; - -/// Struct based on which the pubdata blob is formed -#[derive(Debug, Clone, Default)] -pub(crate) struct PubdataInput { - pub(crate) user_logs: Vec, - pub(crate) l2_to_l1_messages: Vec>, - pub(crate) published_bytecodes: Vec>, - pub(crate) state_diffs: Vec, -} - -impl PubdataInput { - pub(crate) fn build_pubdata(self, with_uncompressed_state_diffs: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - } = self; - - // Encoding user L2->L1 logs. - // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` - l1_messenger_pubdata.extend((user_logs.len() as u32).to_be_bytes()); - for l2tol1log in user_logs { - l1_messenger_pubdata.extend(l2tol1log.packed_encoding()); - } - - // Encoding L2->L1 messages - // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` - l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); - for message in l2_to_l1_messages { - l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(message); - } - - // Encoding bytecodes - // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` - l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); - for bytecode in published_bytecodes { - l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(bytecode); - } - - // Encoding state diffs - // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` - let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); - l1_messenger_pubdata.extend(state_diffs_compressed); - - if with_uncompressed_state_diffs { - l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); - for state_diff in state_diffs { - l1_messenger_pubdata.extend(state_diff.encode_padded()); - } - } - - l1_messenger_pubdata - } -} - -#[cfg(test)] -mod tests { - use zksync_system_constants::{ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS}; - use zksync_utils::u256_to_h256; - - use super::*; - - #[test] - fn test_basic_pubdata_building() { - // Just using some constant addresses for tests - let addr1 = BOOTLOADER_ADDRESS; - let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; - - let user_logs = vec![L1MessengerL2ToL1Log { - l2_shard_id: 0, - is_service: false, - tx_number_in_block: 0, - sender: addr1, - key: 1.into(), - value: 128.into(), - }]; - - let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; - - let published_bytecodes = vec![hex::decode("aaaabbbb").unwrap()]; - - // For covering more cases, we have two state diffs: - // One with enumeration index present (and so it is a repeated write) and the one without it. - let state_diffs = vec![ - StateDiffRecord { - address: addr2, - key: 155.into(), - derived_key: u256_to_h256(125.into()).0, - enumeration_index: 12, - initial_value: 11.into(), - final_value: 12.into(), - }, - StateDiffRecord { - address: addr2, - key: 156.into(), - derived_key: u256_to_h256(126.into()).0, - enumeration_index: 0, - initial_value: 0.into(), - final_value: 14.into(), - }, - ]; - - let input = PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - }; - - let pubdata = - ethabi::encode(&[ethabi::Token::Bytes(input.build_pubdata(true))])[32..].to_vec(); - - assert_eq!(hex::encode(pubdata), "00000000000000000000000000000000000000000000000000000000000002c700000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000004aaaabbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index cb4b13eecdf..d25f66361f1 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -191,6 +191,7 @@ pub(crate) fn new_vm_state( system_env.execution_mode, bootloader_initial_memory, first_l2_block, + system_env.version, ); (vm, bootloader_state) diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 3a36b008e88..ef6cee454a8 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::{collections::HashMap, rc::Rc}; use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ @@ -7,6 +7,7 @@ use zksync_types::{ Transaction, H256, }; use zksync_utils::{be_words_to_bytes, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -21,7 +22,7 @@ use crate::{ vm_latest::{ bootloader_state::BootloaderState, old_vm::{events::merge_events, history_recorder::HistoryEnabled}, - tracers::dispatcher::TracerDispatcher, + tracers::{dispatcher::TracerDispatcher, PubdataTracer}, types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}, }, HistoryMode, @@ -38,6 +39,8 @@ pub(crate) enum MultiVMSubversion { SmallBootloaderMemory, /// The final correct version of v1.5.0 IncreasedBootloaderMemory, + /// VM for post-gateway versions. + Gateway, } impl MultiVMSubversion { @@ -55,6 +58,7 @@ impl TryFrom for MultiVMSubversion { match value { VmVersion::Vm1_5_0SmallBootloaderMemory => Ok(Self::SmallBootloaderMemory), VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(Self::IncreasedBootloaderMemory), + VmVersion::VmGateway => Ok(Self::Gateway), _ => Err(VmVersionIsNotVm150Error), } } @@ -148,9 +152,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -182,19 +186,30 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let pubdata_tracer = Some(PubdataTracer::new( + self.batch_env.clone(), + VmExecutionMode::Batch, + self.subversion, + Some(pubdata_builder.clone()), + )); + + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + pubdata_tracer, + ); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.bootloader_state.bootloader_memory(); + let bootloader_memory = self + .bootloader_state + .bootloader_memory(pubdata_builder.as_ref()); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, final_bootloader_memory: Some(bootloader_memory), pubdata_input: Some( self.bootloader_state - .get_pubdata_information() - .clone() - .build_pubdata(false), + .settlement_layer_pubdata(pubdata_builder.as_ref()), ), state_diffs: Some( self.bootloader_state diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 3d57d1cd543..55afeed17cd 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -1,13 +1,15 @@ +use std::rc::Rc; + use zksync_types::{vm::VmVersion, Transaction}; use zksync_utils::h256_to_u256; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_m5::{ storage::Storage, @@ -75,10 +77,10 @@ impl VmInterface for Vm { fn inspect( &mut self, _tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => self.vm.execute_next_tx().glue_into(), TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self .vm @@ -87,8 +89,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -110,11 +111,11 @@ impl VmInterface for Vm { // Bytecode compression isn't supported ( Ok(vec![].into()), - self.inspect(&mut (), VmExecutionMode::OneTx), + self.inspect(&mut (), InspectExecutionMode::OneTx), ) } - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m5::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 1ee6aa61822..4c67a218418 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -1,13 +1,14 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::{vm::VmVersion, Transaction}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, @@ -88,7 +89,7 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -97,7 +98,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer.call_tracer.is_some(); let result = self.vm.execute_next_tx( @@ -116,8 +117,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -207,7 +207,7 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m6::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 2bcd68bec04..81b0c52cce5 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -1,5 +1,8 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -88,9 +91,9 @@ impl VmInterface for Vm { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(dispatcher, execution_mode) + self.inspect_inner(dispatcher, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -105,7 +108,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); - let result = self.inspect(dispatcher, VmExecutionMode::OneTx); + let result = self.inspect(dispatcher, InspectExecutionMode::OneTx); if self.has_unpublished_bytecodes() { ( Err(BytecodeCompressionError::BytecodeCompressionFailed), @@ -122,8 +125,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 497128c64bd..a2d18e10de4 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -1,5 +1,8 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, @@ -88,9 +91,9 @@ impl VmInterface for Vm { fn inspect( &mut self, tracer: &mut TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -122,8 +125,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 43a6c48aa9c..5ff27046377 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,7 +1,8 @@ -use std::mem; +use std::{mem, rc::Rc}; use zksync_types::{vm::VmVersion, ProtocolVersionId, Transaction}; use zksync_vm2::interface::Tracer; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::history_mode::HistoryMode, @@ -9,8 +10,8 @@ use crate::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, utils::ShadowVm, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::TracerDispatcher, vm_latest::HistoryEnabled, @@ -63,7 +64,7 @@ impl VmInterface for LegacyVmInstance { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { dispatch_legacy_vm!(self.inspect(&mut mem::take(dispatcher).into(), execution_mode)) } @@ -87,8 +88,8 @@ impl VmInterface for LegacyVmInstance { } /// Return the results of execution of all batch - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_legacy_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_legacy_vm!(self.finish_batch(pubdata_builder)) } } @@ -206,6 +207,15 @@ impl LegacyVmInstance { ); Self::Vm1_5_0(vm) } + VmVersion::VmGateway => { + let vm = crate::vm_latest::Vm::new_with_subversion( + l1_batch_env, + system_env, + storage_view, + crate::vm_latest::MultiVMSubversion::Gateway, + ); + Self::Vm1_5_0(vm) + } } } @@ -253,7 +263,7 @@ impl VmInterface for FastVmInsta fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match self { Self::Fast(vm) => vm.inspect(&mut tracer.1, execution_mode), @@ -283,8 +293,8 @@ impl VmInterface for FastVmInsta } } - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_fast_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_fast_vm!(self.finish_batch(pubdata_builder)) } } diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs index 84c40436750..84f03c5afe3 100644 --- a/core/lib/protobuf_config/src/contracts.rs +++ b/core/lib/protobuf_config/src/contracts.rs @@ -76,6 +76,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("l2_shared_bridge_addr")?, + l2_legacy_shared_bridge_addr: l2 + .legacy_shared_bridge_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_legacy_shared_bridge_addr")?, l1_weth_bridge_proxy_addr: weth_bridge .as_ref() .and_then(|bridge| bridge.l1_address.as_ref().map(|x| parse_h160(x))) @@ -107,6 +113,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("chain_admin_addr")?, + l2_da_validator_addr: l2 + .da_validator_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_da_validator_addr")?, }) } @@ -142,6 +154,10 @@ impl ProtoRepr for proto::Contracts { }), l2: Some(proto::L2 { testnet_paymaster_addr: this.l2_testnet_paymaster_addr.map(|a| format!("{:?}", a)), + da_validator_addr: this.l2_da_validator_addr.map(|a| format!("{:?}", a)), + legacy_shared_bridge_addr: this + .l2_legacy_shared_bridge_addr + .map(|a| format!("{:?}", a)), }), bridges: Some(proto::Bridges { shared: Some(proto::Bridge { diff --git a/core/lib/protobuf_config/src/proto/config/contracts.proto b/core/lib/protobuf_config/src/proto/config/contracts.proto index f4488c7901a..6ab03e6aa11 100644 --- a/core/lib/protobuf_config/src/proto/config/contracts.proto +++ b/core/lib/protobuf_config/src/proto/config/contracts.proto @@ -21,6 +21,8 @@ message L1 { message L2 { optional string testnet_paymaster_addr = 1; // optional; H160 + optional string da_validator_addr = 2; // optional; H160 + optional string legacy_shared_bridge_addr = 3; // optional; H160 } message Bridge { diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 97de24f42da..cfc1d4a0d55 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -5,7 +5,7 @@ use serde_with::{serde_as, Bytes}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_object_store::{_reexports::BoxedError, serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ - basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, + basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, commitment::PubdataParams, witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, }; @@ -269,6 +269,7 @@ pub struct V1TeeVerifierInput { pub l2_blocks_execution_data: Vec, pub l1_batch_env: L1BatchEnv, pub system_env: SystemEnv, + pub pubdata_params: PubdataParams, } impl V1TeeVerifierInput { @@ -278,6 +279,7 @@ impl V1TeeVerifierInput { l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Self { V1TeeVerifierInput { vm_run_data, @@ -285,6 +287,7 @@ impl V1TeeVerifierInput { l2_blocks_execution_data, l1_batch_env, system_env, + pubdata_params, } } } diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index 2c9b1440af2..cf68d2e181a 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -182,6 +182,7 @@ pub(super) fn mock_l2_block_header(l2_block_number: L2BlockNumber) -> L2BlockHea virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index decb2a0f403..a12508f615f 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -88,6 +88,7 @@ pub(crate) async fn create_l2_block( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index ffe3a548a02..140085dbb9f 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -15,6 +15,7 @@ use zksync_multivm::{ FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, + pubdata_builders::pubdata_params_to_builder, vm_latest::HistoryEnabled, LegacyVmInstance, }; @@ -22,7 +23,8 @@ use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; use zksync_types::{ - block::L2BlockExecutionData, L1BatchNumber, StorageLog, StorageValue, Transaction, H256, + block::L2BlockExecutionData, commitment::PubdataParams, L1BatchNumber, StorageLog, + StorageValue, Transaction, H256, }; use zksync_utils::u256_to_h256; @@ -88,7 +90,7 @@ impl Verify for V1TeeVerifierInput { let storage_snapshot = StorageSnapshot::new(storage, factory_deps); let storage_view = StorageView::new(storage_snapshot).to_rc_ptr(); let vm = LegacyVmInstance::new(self.l1_batch_env, self.system_env, storage_view); - let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; + let vm_out = execute_vm(self.l2_blocks_execution_data, vm, self.pubdata_params)?; let block_output_with_proofs = get_bowp(self.merkle_paths)?; @@ -178,6 +180,7 @@ fn get_bowp(witness_input_merkle_paths: WitnessInputMerklePaths) -> Result( l2_blocks_execution_data: Vec, mut vm: LegacyVmInstance, + pubdata_params: PubdataParams, ) -> anyhow::Result { let next_l2_blocks_data = l2_blocks_execution_data.iter().skip(1); @@ -206,7 +209,7 @@ fn execute_vm( tracing::trace!("about to vm.finish_batch()"); - Ok(vm.finish_batch()) + Ok(vm.finish_batch(pubdata_params_to_builder(pubdata_params))) } /// Map `LogQuery` and `TreeLogEntry` to a `TreeInstruction` @@ -356,6 +359,7 @@ mod tests { default_validation_computational_gas_limit: 0, chain_id: Default::default(), }, + Default::default(), ); let tvi = TeeVerifierInput::new(tvi); let serialized = bincode::serialize(&tvi).expect("Failed to serialize TeeVerifierInput."); diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 209ab7c24f9..daaa5651a03 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -1,7 +1,7 @@ //! API types related to the External Node specific methods. use serde::{Deserialize, Serialize}; -use zksync_basic_types::{Address, L1BatchNumber, L2BlockNumber, H256}; +use zksync_basic_types::{commitment::PubdataParams, Address, L1BatchNumber, L2BlockNumber, H256}; use zksync_contracts::BaseSystemContractsHashes; use crate::ProtocolVersionId; @@ -42,6 +42,8 @@ pub struct SyncBlock { pub hash: Option, /// Version of the protocol used for this block. pub protocol_version: ProtocolVersionId, + /// Pubdata params used for this batch + pub pubdata_params: Option, } /// Global configuration of the consensus served by the main node to the external nodes. diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index b8f8a2f0584..a4eb6460553 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -206,6 +206,7 @@ pub struct BridgeAddresses { pub l2_erc20_default_bridge: Option
, pub l1_weth_bridge: Option
, pub l2_weth_bridge: Option
, + pub l2_legacy_shared_bridge: Option
, } #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 9211a6f1d8c..310e3a73b8e 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,7 +1,7 @@ use std::{fmt, ops}; use serde::{Deserialize, Serialize}; -use zksync_basic_types::{Address, Bloom, BloomInput, H256, U256}; +use zksync_basic_types::{commitment::PubdataParams, Address, Bloom, BloomInput, H256, U256}; use zksync_contracts::BaseSystemContractsHashes; use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; use zksync_utils::concat_and_hash; @@ -113,6 +113,7 @@ pub struct L2BlockHeader { /// amount of gas can be spent on pubdata. pub gas_limit: u64, pub logs_bloom: Bloom, + pub pubdata_params: PubdataParams, } /// Structure that represents the data is returned by the storage oracle during batch execution. diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 759ee8947ba..40532a1e589 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -9,11 +9,12 @@ use std::{collections::HashMap, convert::TryFrom}; use serde::{Deserialize, Serialize}; -pub use zksync_basic_types::commitment::L1BatchCommitmentMode; +pub use zksync_basic_types::commitment::{L1BatchCommitmentMode, PubdataParams}; use zksync_contracts::BaseSystemContractsHashes; +use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher}; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::{ - KNOWN_CODES_STORAGE_ADDRESS, L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY, + KNOWN_CODES_STORAGE_ADDRESS, L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY_PRE_GATEWAY, ZKPORTER_IS_AVAILABLE, }; use zksync_utils::u256_to_h256; @@ -22,8 +23,8 @@ use crate::{ blob::num_blobs_required, block::{L1BatchHeader, L1BatchTreeData}, l2_to_l1_log::{ - l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes, L2ToL1Log, SystemL2ToL1Log, - UserL2ToL1Log, + l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes_pre_gateway, L2ToL1Log, + SystemL2ToL1Log, UserL2ToL1Log, }, web3::keccak256, writes::{ @@ -92,6 +93,16 @@ pub struct L1BatchMetadata { /// commitment to the transactions in the batch. pub bootloader_initial_content_commitment: Option, pub state_diffs_compressed: Vec, + /// Hash of packed state diffs. It's present only for post-gateway batches. + pub state_diff_hash: Option, + /// Root hash of the local logs tree. Tree contains logs that were produced on this chain. + /// It's present only for post-gateway batches. + pub local_root: Option, + /// Root hash of the aggregated logs tree. Tree aggregates `local_root`s of chains that settle on this chain. + /// It's present only for post-gateway batches. + pub aggregation_root: Option, + /// Data Availability inclusion proof, that has to be verified on the settlement layer. + pub da_inclusion_data: Option>, } impl L1BatchMetadata { @@ -265,6 +276,13 @@ pub struct L1BatchAuxiliaryCommonOutput { protocol_version: ProtocolVersionId, } +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)] +#[cfg_attr(test, derive(Serialize, Deserialize))] +pub struct BlobHash { + pub commitment: H256, + pub linear_hash: H256, +} + /// Block Output produced by Virtual Machine #[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] @@ -283,8 +301,9 @@ pub enum L1BatchAuxiliaryOutput { state_diffs_compressed: Vec, state_diffs_hash: H256, aux_commitments: AuxCommitments, - blob_linear_hashes: Vec, - blob_commitments: Vec, + blob_hashes: Vec, + aggregation_root: H256, + local_root: H256, }, } @@ -333,17 +352,23 @@ impl L1BatchAuxiliaryOutput { system_logs, state_diffs, aux_commitments, - blob_commitments, + blob_hashes, + aggregation_root, } => { let l2_l1_logs_compressed = serialize_commitments(&common_input.l2_to_l1_logs); let merkle_tree_leaves = l2_l1_logs_compressed .chunks(UserL2ToL1Log::SERIALIZED_SIZE) .map(|chunk| <[u8; UserL2ToL1Log::SERIALIZED_SIZE]>::try_from(chunk).unwrap()); - let l2_l1_logs_merkle_root = MiniMerkleTree::new( + let local_root = MiniMerkleTree::new( merkle_tree_leaves, Some(l2_to_l1_logs_tree_size(common_input.protocol_version)), ) .merkle_root(); + let l2_l1_logs_merkle_root = if common_input.protocol_version.is_pre_gateway() { + local_root + } else { + KeccakHasher.compress(&local_root, &aggregation_root) + }; let common_output = L1BatchAuxiliaryCommonOutput { l2_l1_logs_merkle_root, @@ -357,22 +382,33 @@ impl L1BatchAuxiliaryOutput { let state_diffs_hash = H256::from(keccak256(&(state_diffs_packed))); let state_diffs_compressed = compress_state_diffs(state_diffs); - let blob_linear_hashes = - parse_system_logs_for_blob_hashes(&common_input.protocol_version, &system_logs); - // Sanity checks. System logs are empty for the genesis batch, so we can't do checks for it. if !system_logs.is_empty() { - let state_diff_hash_from_logs = system_logs - .iter() - .find_map(|log| { - (log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY.into())) - .then_some(log.0.value) - }) - .expect("Failed to find state diff hash in system logs"); - assert_eq!( - state_diffs_hash, state_diff_hash_from_logs, - "State diff hash mismatch" - ); + if common_input.protocol_version.is_pre_gateway() { + let state_diff_hash_from_logs = system_logs + .iter() + .find_map(|log| { + (log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY_PRE_GATEWAY.into())) + .then_some(log.0.value) + }) + .expect("Failed to find state diff hash in system logs"); + assert_eq!( + state_diffs_hash, state_diff_hash_from_logs, + "State diff hash mismatch" + ); + + let blob_linear_hashes_from_logs = + parse_system_logs_for_blob_hashes_pre_gateway( + &common_input.protocol_version, + &system_logs, + ); + let blob_linear_hashes: Vec<_> = + blob_hashes.iter().map(|b| b.linear_hash).collect(); + assert_eq!( + blob_linear_hashes, blob_linear_hashes_from_logs, + "Blob linear hashes mismatch" + ); + } let l2_to_l1_logs_tree_root_from_logs = system_logs .iter() @@ -387,25 +423,45 @@ impl L1BatchAuxiliaryOutput { ); } - assert_eq!( - blob_linear_hashes.len(), - blob_commitments.len(), - "Blob linear hashes and commitments have different lengths" - ); - Self::PostBoojum { common: common_output, system_logs_linear_hash, state_diffs_compressed, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, + local_root, + aggregation_root, } } } } + pub fn local_root(&self) -> H256 { + match self { + Self::PreBoojum { common, .. } => common.l2_l1_logs_merkle_root, + Self::PostBoojum { local_root, .. } => *local_root, + } + } + + pub fn aggregation_root(&self) -> H256 { + match self { + Self::PreBoojum { .. } => H256::zero(), + Self::PostBoojum { + aggregation_root, .. + } => *aggregation_root, + } + } + + pub fn state_diff_hash(&self) -> H256 { + match self { + Self::PreBoojum { .. } => H256::zero(), + Self::PostBoojum { + state_diffs_hash, .. + } => *state_diffs_hash, + } + } + pub fn to_bytes(&self) -> Vec { let mut result = Vec::new(); @@ -426,8 +482,7 @@ impl L1BatchAuxiliaryOutput { system_logs_linear_hash, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, .. } => { result.extend(system_logs_linear_hash.as_bytes()); @@ -439,9 +494,9 @@ impl L1BatchAuxiliaryOutput { ); result.extend(aux_commitments.events_queue_commitment.as_bytes()); - for i in 0..blob_commitments.len() { - result.extend(blob_linear_hashes[i].as_bytes()); - result.extend(blob_commitments[i].as_bytes()); + for b in blob_hashes { + result.extend(b.linear_hash.as_bytes()); + result.extend(b.commitment.as_bytes()); } } } @@ -637,6 +692,9 @@ impl L1BatchCommitment { aux_commitments: self.aux_commitments(), compressed_initial_writes, compressed_repeated_writes, + local_root: self.auxiliary_output.local_root(), + aggregation_root: self.auxiliary_output.aggregation_root(), + state_diff_hash: self.auxiliary_output.state_diff_hash(), } } } @@ -673,7 +731,8 @@ pub enum CommitmentInput { system_logs: Vec, state_diffs: Vec, aux_commitments: AuxCommitments, - blob_commitments: Vec, + blob_hashes: Vec, + aggregation_root: H256, }, } @@ -715,11 +774,11 @@ impl CommitmentInput { events_queue_commitment: H256::zero(), bootloader_initial_content_commitment: H256::zero(), }, - blob_commitments: { + blob_hashes: { let num_blobs = num_blobs_required(&protocol_version); - - vec![H256::zero(); num_blobs] + vec![Default::default(); num_blobs] }, + aggregation_root: H256::zero(), } } } @@ -734,4 +793,7 @@ pub struct L1BatchCommitmentArtifacts { pub compressed_repeated_writes: Option>, pub zkporter_is_available: bool, pub aux_commitments: Option, + pub aggregation_root: H256, + pub local_root: H256, + pub state_diff_hash: H256, } diff --git a/core/lib/types/src/commitment/tests/mod.rs b/core/lib/types/src/commitment/tests/mod.rs index 33fb0142b04..a95318309a2 100644 --- a/core/lib/types/src/commitment/tests/mod.rs +++ b/core/lib/types/src/commitment/tests/mod.rs @@ -55,3 +55,8 @@ fn post_boojum_1_5_0() { fn post_boojum_1_5_0_with_evm() { run_test("post_boojum_1_5_0_test_with_evm"); } + +#[test] +fn post_gateway() { + run_test("post_gateway_test"); +} diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json index c5eccbce038..c854a6e77d8 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json @@ -190,10 +190,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -248,14 +255,18 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0xe52d57bd64cabf6c588b30365512da2bf10912c106e7a06483b236d05ac4037e" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json index 4983bbeca14..96aa8ab842c 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json @@ -206,10 +206,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -264,14 +271,18 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x0b6e1ad4643cc2bee06b5e173184ec822d80826e5720f5715172898350433299" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json index 59a24b7c90c..ed61ea67cef 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json @@ -238,24 +238,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -310,42 +359,74 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json index 4e8c0e0814a..a41aa33c04a 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json @@ -239,24 +239,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -312,42 +361,74 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_gateway_test.json b/core/lib/types/src/commitment/tests/post_gateway_test.json new file mode 100644 index 00000000000..4b598ff59f4 --- /dev/null +++ b/core/lib/types/src/commitment/tests/post_gateway_test.json @@ -0,0 +1,1977 @@ +{ + "hashes": { + "pass_through_data": "0x756c1660f611302295f6a56a8f4b9d68f2ebf51f8278f225d6b7e64bb9364be0", + "aux_output": "0xcccf1ef8192054cb1b5fb668868ce4e069a695a1394b9486ebd3031cec12fe12", + "meta_parameters": "0xdb298fa55c75b134333cee0b39f77aea956553a1eb861a5777dc7a66ad7a55b9", + "commitment": "0xd6615c5447c817a320c69c6a5af12c472fd4d5bc2ef4de7806d40afe384ddc27" + }, + "auxiliary_output": { + "PostBoojum": { + "common": { + "l2_l1_logs_merkle_root": "0x38eaeef3afe69b6f6b2fa22c92da8137f1e405a1e1861b7de7cfa30c7d7462dd", + "protocol_version": "Version27" + }, + "system_logs_linear_hash": "0xe8460ce1ed47b77cfee3cadf803aa089c144c506ea2bdd358a6a38ff2c7bc8e3", + "state_diffs_compressed": [ + 1,0,27,89,4,0,148,112,120,89,162,183,230,11,175,17,100,223,232,175,83,47,195,198,157,29,129,145,197,186,61,127,17,109,250,141,181,206,45,0,1,0,1,67,6,51,197,115,134,143,51,94,49,6,252,85,139,173,197,6,118,46,184,24,78,249,206,120,93,239,110,206,130,208,215,121,46,249,196,126,160,123,216,26,86,45,8,246,35,74,8,171,141,141,223,145,137,150,142,180,236,158,154,37,0,1,0,14,207,174,184,55,189,9,139,207,155,222,111,194,204,216,232,169,53,90,27,112,230,1,172,24,205,8,158,179,8,246,11,47,22,184,171,230,29,125,57,179,213,44,191,157,128,184,167,253,5,55,217,60,33,8,75,147,188,5,4,171,60,0,1,0,0,195,40,243,40,221,130,10,29,214,152,4,122,127,125,73,135,77,130,89,25,110,39,53,23,67,10,248,244,128,203,204,98,199,195,136,172,152,215,47,208,131,209,215,32,206,186,255,203,162,198,108,114,94,200,185,197,197,240,116,111,138,0,1,0,0,91,142,88,121,116,4,61,89,191,251,246,50,208,32,231,100,149,154,190,98,228,194,56,216,223,46,98,178,181,235,143,74,199,189,78,241,151,159,154,102,86,114,178,92,208,123,30,61,99,122,89,162,199,107,26,34,232,91,117,146,65,0,1,0,1,67,6,51,197,115,134,143,51,94,49,6,252,85,139,173,197,6,118,46,184,24,78,249,206,120,93,239,110,206,66,202,106,148,168,163,117,186,10,227,150,70,185,29,164,88,23,175,73,33,116,119,174,107,73,193,3,53,191,78,11,115,0,1,0,1,179,126,95,140,175,172,146,75,62,102,55,121,225,44,128,218,206,138,2,177,210,115,174,112,143,39,90,214,42,71,164,52,102,233,54,181,135,193,191,158,16,243,13,105,123,17,66,228,89,233,6,51,219,18,27,114,127,245,180,121,43,0,1,0,0,103,85,79,83,198,102,14,83,189,151,41,240,10,238,1,137,1,13,254,184,104,250,60,189,72,18,50,72,12,95,233,138,250,154,14,193,124,34,99,2,123,232,159,70,229,238,83,58,30,33,169,31,255,76,177,68,204,74,239,33,188,0,1,0,5,181,151,235,2,5,139,35,12,238,106,156,223,71,5,65,13,21,26,209,234,106,211,226,222,119,132,26,207,194,142,107,134,4,159,177,116,20,22,26,103,250,177,44,27,34,144,68,85,154,16,112,202,176,23,66,216,18,18,139,156,190,0,1,0,0,103,85,79,83,198,102,14,83,189,151,41,240,10,238,1,137,1,13,254,184,104,250,60,189,72,18,50,72,12,140,78,248,13,214,150,89,51,34,40,248,136,164,249,180,131,12,12,50,27,140,170,45,67,53,35,250,200,97,150,181,207,0,1,0,4,139,241,177,101,244,200,80,208,30,184,30,92,4,15,112,234,1,220,160,25,231,128,168,127,128,236,120,151,195,235,96,109,232,32,223,19,108,89,177,255,121,225,223,146,96,66,108,164,143,175,146,74,70,53,36,33,31,183,195,161,165,0,1,0,2,1,233,35,243,228,122,223,167,196,240,246,30,168,221,66,86,246,109,16,153,151,11,205,49,72,146,134,73,23,95,127,60,252,81,90,137,254,204,8,97,6,46,254,57,252,48,104,36,27,116,128,26,163,170,147,245,100,172,98,242,54,0,1,0,6,29,196,13,11,194,203,129,144,94,107,193,66,30,189,75,216,183,154,172,184,218,79,157,113,69,178,136,103,79,207,14,96,187,125,119,111,198,230,184,241,1,19,161,190,119,25,192,44,34,151,163,108,216,124,11,59,35,121,140,74,95,0,1,0,7,15,28,212,226,131,6,7,175,48,25,39,67,145,15,223,171,241,221,0,74,128,115,148,66,117,129,157,29,254,53,89,195,90,167,22,152,246,194,202,70,67,239,232,80,69,169,73,79,38,45,119,238,103,193,61,215,52,230,38,48,90,0,1,0,1,67,6,51,197,115,134,143,51,94,49,6,252,85,139,173,197,6,118,46,184,24,78,249,206,120,93,239,110,206,186,93,97,46,65,80,43,253,205,126,211,179,176,210,212,177,245,200,248,185,15,209,21,42,187,224,222,192,14,162,61,7,0,1,0,4,139,241,177,101,244,200,80,208,30,184,30,92,4,15,112,234,1,220,160,25,231,128,168,127,128,236,120,151,195,44,61,141,160,220,54,28,84,148,218,146,175,212,98,94,116,25,190,241,121,131,189,209,145,214,33,89,62,212,173,57,47,0,1,0,3,35,183,38,99,20,185,211,236,56,125,91,205,149,144,165,11,224,14,120,136,55,202,90,136,13,151,227,238,131,48,100,113,86,255,138,164,229,100,8,99,14,34,251,194,115,119,250,250,242,7,188,204,248,210,254,18,115,9,165,229,233,0,1,0,2,21,205,111,48,109,117,125,93,47,219,180,96,198,15,41,133,132,23,158,236,192,113,150,199,174,142,79,141,100,200,51,40,178,38,74,180,112,167,221,220,163,38,200,255,61,159,78,76,252,60,226,78,168,221,216,201,180,12,20,188,185,0,1,0,2,1,233,35,243,228,122,223,167,196,240,246,30,168,221,66,86,246,109,16,153,151,11,205,49,72,146,134,73,23,229,176,77,5,169,51,88,54,33,49,122,209,137,227,159,45,116,33,7,146,238,29,46,153,91,171,175,162,128,71,14,27,0,1,0,1,83,6,251,15,11,95,222,33,153,150,85,28,128,114,198,113,27,186,83,0,178,102,154,235,15,14,76,116,69,250,253,202,115,87,157,171,40,23,48,73,193,157,78,81,69,162,232,29,120,68,42,125,135,121,254,156,149,143,198,173,119,0,1,0,0,53,186,49,237,22,194,17,96,120,10,192,151,99,191,213,147,177,116,143,105,230,131,169,251,16,146,26,164,157,188,67,139,78,87,51,241,113,6,164,3,59,144,48,243,76,127,49,1,147,79,102,218,253,36,37,149,91,92,247,35,64,0,1,0,0,237,48,107,41,18,142,6,90,253,206,115,235,200,224,236,208,140,217,24,253,192,247,76,3,246,77,81,94,72,6,115,118,133,155,130,106,66,115,187,68,69,230,28,222,77,91,95,90,23,4,86,255,161,95,247,195,108,233,152,241,190,0,1,0,1,63,74,44,150,76,113,212,159,45,136,118,161,30,238,75,244,232,209,146,49,101,47,199,117,178,202,228,58,33,129,4,114,76,225,212,61,128,125,223,69,48,213,107,167,249,183,181,194,21,67,99,215,247,166,215,108,189,158,61,249,130,0,1,0,0,103,33,171,145,26,65,216,181,37,2,234,76,223,66,236,153,229,229,41,190,106,62,102,243,173,178,20,60,90,51,232,118,240,225,158,242,19,13,216,95,254,79,35,196,212,101,148,164,24,219,221,10,181,111,253,164,76,93,72,246,206,0,1,0,5,187,116,113,160,1,107,28,51,15,118,122,94,115,173,12,51,9,52,105,76,173,0,124,90,163,249,38,190,101,25,143,99,94,50,239,35,14,215,12,184,219,25,32,81,51,246,142,27,126,246,157,133,33,13,119,172,197,111,163,43,234,0,1,0,5,21,101,126,190,1,77,145,81,72,7,18,222,122,16,141,155,14,26,122,121,141,61,162,148,91,165,60,209,3,90,251,120,52,143,255,91,253,53,60,239,129,160,65,213,230,214,195,241,114,123,145,145,220,232,75,132,91,7,118,101,237,0,1,0,6,49,18,87,196,3,187,186,126,42,239,10,162,8,34,98,124,130,236,116,132,252,179,27,135,221,140,88,42,169,240,163,222,200,2,37,101,9,35,172,42,74,77,142,96,167,8,137,208,171,61,234,142,107,218,41,37,203,138,127,216,252,137,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,174,229,34,198,20,187,1,37,21,66,226,45,128,16,30,45,151,85,103,77,143,214,69,38,254,154,44,77,223,171,97,143,137,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,159,186,133,75,226,253,235,173,50,111,19,111,136,219,244,177,114,214,77,28,237,51,180,171,99,164,148,28,226,73,151,137,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,201,172,113,10,243,67,127,194,244,249,48,131,50,164,72,10,88,81,76,45,149,28,73,119,114,174,142,141,132,8,175,27,137,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,182,81,125,81,147,30,201,86,98,178,2,213,133,189,82,214,234,207,27,118,113,82,28,46,150,32,45,104,62,223,226,99,137,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,28,170,140,159,117,250,84,163,177,210,240,18,225,217,234,99,118,79,112,157,28,25,151,121,72,28,143,77,92,237,107,62,9,1,45,86,211,71,72,250,222,240,196,161,223,115,65,15,173,85,177,255,211,89,90,168,146,255,238,205,12,128,137,196,203,27,9,1,12,138,2,247,115,213,230,144,244,253,93,195,182,20,84,243,71,244,71,24,244,103,128,231,65,233,198,173,128,126,246,169,9,1,185,67,192,97,81,170,106,240,157,23,26,106,216,228,65,120,68,165,135,110,2,31,216,158,187,67,79,105,151,157,234,15,9,1,12,91,247,177,168,165,63,93,186,29,121,106,121,167,27,50,198,22,230,125,252,159,77,132,92,155,115,251,100,87,112,147,9,1,127,216,219,101,110,162,74,65,114,24,131,123,143,204,15,44,36,72,8,136,170,255,39,231,108,143,128,71,65,95,117,4,9,1,4,27,221,133,245,254,194,84,195,88,19,141,109,233,58,225,116,116,251,225,170,44,159,23,28,181,85,238,6,151,63,144,9,1,130,114,163,11,234,79,36,77,175,198,107,147,58,183,234,134,122,178,61,205,225,34,184,146,138,50,221,70,198,19,2,191,9,1,41,151,245,131,127,195,211,240,254,148,60,106,169,97,173,173,118,116,195,243,213,115,169,17,155,83,25,181,108,68,48,51,9,1,236,254,112,114,24,207,172,186,132,163,119,198,20,66,226,51,51,142,39,212,88,198,68,118,92,136,138,204,26,155,11,165,9,1,215,116,3,65,140,246,136,209,81,15,184,214,188,49,94,63,81,57,12,135,108,143,41,137,113,88,11,84,15,30,158,184,9,1,50,203,140,60,152,140,103,117,103,130,42,29,70,236,110,49,211,18,247,40,117,35,54,107,171,190,233,18,117,69,68,43,9,1,95,50,92,228,81,22,16,190,182,42,66,158,131,165,204,25,25,20,143,210,29,170,143,129,94,111,129,132,227,28,102,180,9,1,96,153,155,64,152,142,147,161,102,88,190,194,238,147,14,243,71,26,33,184,193,50,249,29,88,2,52,157,179,7,69,77,9,1,212,255,51,143,196,70,53,156,98,221,171,235,82,21,252,198,242,28,2,246,195,67,6,91,2,240,95,173,200,49,66,89,9,1,58,212,15,123,117,242,193,79,217,63,177,112,56,232,153,140,93,188,111,168,108,138,82,113,212,107,209,150,246,205,191,157,9,1,204,180,39,152,129,136,139,125,156,240,127,28,205,2,65,12,140,132,177,76,5,4,95,204,205,9,179,77,57,148,6,231,9,1,249,145,142,225,129,214,86,160,12,71,51,28,109,238,246,115,57,184,6,234,138,46,107,81,103,128,201,242,101,51,179,68,9,1,17,109,102,169,143,117,42,93,149,160,20,188,122,34,0,140,248,73,206,232,146,65,183,250,61,35,40,54,167,63,173,215,9,1,58,197,183,31,149,121,187,250,193,140,202,222,69,149,235,105,78,113,59,213,78,241,15,40,62,137,46,19,193,78,85,31,9,1,209,81,122,212,165,252,102,254,115,58,127,209,26,21,188,113,69,3,30,255,154,72,181,219,97,7,227,96,209,19,138,181,9,1,151,245,134,14,100,30,161,175,227,142,158,71,197,157,213,103,198,28,241,51,173,107,242,84,76,53,176,101,132,26,29,60,9,1,72,239,59,253,40,148,227,213,236,98,100,14,198,212,71,148,180,209,64,152,228,196,11,209,109,231,183,97,135,156,172,241,9,1,205,107,120,198,53,118,206,64,8,204,37,15,58,43,95,189,38,38,203,212,73,105,50,160,21,160,38,124,10,233,46,22,17,1,14,204,0,103,138,56,182,245,161,43,137,56,202,232,138,228,30,242,80,214,237,253,8,17,251,148,203,85,106,127,162,114,20,161,98,175,120,161,168,109,95,168,170,123,117,238,24,132,211,108,163,195,25,58,89,16,198,41,13,94,89,2,119,169,28,80,179,104,66,21,38,252,16,146,163,159,122,68,234,161,165,150,251,139,57,4,0,82,244,49,170,53,221,128,152,46,60,102,97,65,18,80,60,162,198,227,68,116,95,74,43,207,201,189,126,9,199,85,132,67,87,214,19,120,116,147,47,78,236,178,95,23,23,1,171,197,181,63,197,52,162,224,221,93,223,35,243,248,138,100,215,25,1,0,2,94,249,142,243,210,28,27,218,191,10,139,245,66,107,2,111,153,125,18,238,76,249,208,69,34,173,165,21,177,18,82,239,17,1,14,217,248,73,34,237,151,158,186,178,226,225,234,58,186,218,7,175,174,60,185,248,248,25,28,51,154,61,168,213,77,242,169,0,82,244,49,170,53,221,128,152,46,60,102,97,65,18,80,60,162,198,227,68,116,95,74,43,207,201,189,126,9,199,85,132,194,179,227,24,162,84,191,59,100,89,207,244,98,199,135,44,91,35,210,22,182,249,66,219,89,32,250,61,112,54,16,141,161,98,175,120,161,168,109,95,168,170,123,117,238,24,132,211,108,163,195,25,58,60,170,232,127,53,17,58,173,142,247,89,247,207,149,119,134,64,14,158,82,18,231,188,179,163,89,11,174,81,43,46,153,9,9,132,119,216,135,171,185,255,65,210,32,46,77,152,229,32,71,244,141,39,140,188,245,22,25,184,28,198,202,132,222,174,160,25,1,0,2,119,228,107,44,53,217,61,182,86,125,189,169,81,109,32,249,139,212,234,72,144,24,135,118,89,121,216,219,24,207,66,168,17,1,14,213,5,71,76,80,42,16,77,105,27,101,50,79,76,38,232,167,55,134,79,128,251,113,33,35,116,77,254,28,6,176,0,0,82,244,49,170,53,221,128,152,46,60,102,97,65,18,80,60,162,198,227,68,116,95,74,43,207,201,189,126,9,199,85,132,110,177,116,132,158,116,7,177,77,240,138,82,212,212,241,43,54,8,1,75,42,104,243,5,241,73,226,60,72,169,2,41,25,1,0,3,73,127,150,33,212,30,131,171,90,28,221,170,53,22,176,210,81,154,146,160,81,67,188,184,7,13,240,169,97,51,230,181,9,100,71,111,167,179,223,229,107,45,223,184,100,207,103,16,106,234,217,25,120,51,156,12,142,28,186,4,134,110,182,28,191,11,9,9,62,169,255,238,205,94,99,210,162,31,213,85,158,233,223,231,174,18,241,77,26,133,255,75,40,190,65,163,26,48,53,196,25,1,0,2,232,83,248,233,232,89,11,170,74,117,125,224,222,189,198,137,244,49,205,228,155,200,97,42,160,89,8,63,109,25,91,168,9,9,255,134,239,235,78,5,0,110,98,20,109,14,192,231,250,72,49,145,191,114,177,51,38,242,67,121,217,71,114,50,124,171,9,9,209,167,69,145,2,139,203,92,187,46,4,30,218,0,85,77,176,3,253,201,73,229,148,92,229,57,32,59,244,12,109,96,9,9,113,233,23,33,249,145,133,118,215,96,240,47,3,202,196,124,111,64,3,49,96,49,132,142,60,29,153,230,232,58,71,67,65,14,41,230,74,233,195,128,0,49,87,111,239,58,195,179,2,237,163,15,66,168,74,199,52,200,236,175,1,55,3,126,248,127,239,193,246,133,27,151,79,57,134,3,21,27,16,164,160,185,211,150,83,253,116,26,253,56,22,83,204,70,30,122,203,221,134,84,251,39,141,138,17,246,159,212,31,236,239,75,201,65,5,60,24,80,250,182,152,192,250,91,168,183,69,6,78,180,185,147,215,10,134,34,96,243,26,77,158,213,121,211,188,200,73,204,177,205,8,52,178,106,57,74,136,235,186,254,43,32,141,97,126,192,90,203,191,95,226,69,41,166,75,35,133,169,106,173,67,240,155,225,173,169,44,112,64,49,220,193,72,27,65,8,29,65,249,24,254,23,128,162,84,32,193,217,215,5,53,140,19,76,198,1,217,209,132,203,77,253,222,126,28,172,43,195,212,211,139,249,236,68,230,33,5,245,225,0,18,59,175,197,134,247,119,100,72,140,210,76,106,119,84,110,90,15,232,189,251,79,162,3,207,175,252,54,204,228,221,91,137,1,0,0,0,0,0,0,0,0,0,0,0,0,102,252,2,65,142,125,208,106,197,183,59,71,59,230,188,90,81,3,15,76,116,55,101,124,183,178,155,243,118,197,100,184,209,103,90,94,137,2,0,0,0,0,0,0,0,0,0,0,0,0,102,252,2,66,75,168,78,31,55,208,65,188,110,85,186,57,104,38,204,73,78,132,212,129,91,109,181,38,144,66,46,234,115,134,49,79,0,232,231,118,38,88,111,115,185,85,54,76,123,75,191,11,183,247,104,94,189,64,232,82,177,100,99,58,74,203,211,36,76,61,226,32,44,203,98,106,211,135,215,7,34,230,79,190,68,86,46,47,35,26,41,12,8,83,43,141,106,186,64,47,245,0,242,170,235,6,192,229,86,67,74,201,60,35,47,55,221,139,224,167,191,159,67,15,118,235,86,77,249,252,183,112,196,95,121,9,53,136,208,232,71,239,167,58,16,206,32,228,121,159,177,228,102,66,214,86,23,199,229,33,63,160,73,137,217,45,137,2,0,0,0,0,0,0,0,0,0,0,0,0,102,252,2,66,135,222,210,71,225,102,15,130,112,113,199,241,55,25,52,88,151,81,8,83,132,252,159,68,98,193,241,137,124,92,62,239,137,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,134,36,129,147,235,77,210,168,206,129,95,135,108,18,77,72,53,149,34,240,133,77,149,216,7,46,175,240,211,125,85,189,17,3,32,62,137,13,108,44,59,173,166,238,204,150,3,169,156,28,98,89,237,90,100,2,241,199,108,193,139,86,140,58,239,186,15,17,34,169,145,29,210,173,116,63,242,55,212,17,100,138,15,227,44,109,116,238,192,96,113,106,42,116,53,47,107,28,67,91,93,103,0,249,3,11,120,197,191,90,201,151,167,105,98,170,50,201,10,109,142,142,188,233,131,140,142,235,56,141,115,225,247,101,154,214,116,246,54,163,90,111,26,81,86,78,195,55,27,156,77,163,18,109,90,208,186,227,80,207,199,250,234,199,99,99,184,9,255,104,98,246,102,108,85,7,252,51,21,214,132,35,158,2,38,112,107,69,195,65,114,145,245,183,172,194,211,57,80,82,17,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,180,68,244,119,54,206,136,162,78,107,80,251,5,29,192,174,93,179,175,68,217,8,246,220,217,160,21,208,74,126,225,227,25,1,0,3,59,172,224,22,174,10,65,231,169,237,9,168,91,33,85,109,38,187,242,242,75,76,32,165,75,187,165,27,95,83,162,158,25,1,0,5,237,36,132,158,202,168,131,171,106,32,214,79,172,224,148,150,15,71,73,102,217,162,19,183,2,117,192,112,196,76,181,34,161,61,185,77,114,162,95,100,135,66,67,175,119,110,6,244,73,213,91,169,221,83,157,81,206,111,89,151,62,178,167,63,16,226,11,189,169,125,149,14,110,8,62,221,87,116,233,142,217,139,253,153,16,9,1,186,154,222,88,248,170,108,168,43,242,42,43,72,15,245,221,236,232,166,232,99,81,164,123,16,213,143,51,128,251,219,183,9,1,103,185,128,156,225,233,200,126,96,129,32,179,163,131,84,200,153,155,236,34,245,43,19,243,165,109,226,10,22,113,50,131,9,1,203,224,11,159,230,121,179,34,119,46,123,13,250,7,202,214,183,18,124,144,172,158,237,255,172,53,228,144,236,81,142,168,161,44,60,95,185,9,118,123,106,246,85,186,215,47,93,102,56,245,245,208,160,183,144,135,107,116,64,90,68,61,138,52,178,244,96,20,237,96,5,52,90,158,129,172,204,39,175,55,18,74,73,29,222,9,255,36,49,6,86,93,12,79,206,248,151,94,121,177,178,35,12,1,159,78,58,178,122,63,78,124,169,48,107,159,98,153,132,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,208,111,9,167,39,114,89,78,179,210,171,35,115,181,211,197,236,176,132,184,74,77,237,45,48,18,241,69,222,221,138,25,161,93,224,137,41,163,192,131,82,50,167,205,32,29,51,131,23,202,171,216,115,199,117,193,115,55,85,171,245,34,173,219,214,151,226,215,10,91,97,70,75,209,104,27,41,137,81,196,246,13,142,199,12,25,1,0,4,113,24,36,136,41,22,138,100,28,59,149,105,31,231,215,27,33,193,211,238,215,254,44,202,236,107,125,180,46,38,146,200,25,1,0,2,38,182,122,48,1,162,205,218,95,52,172,146,222,81,199,193,42,178,228,105,133,88,214,83,137,237,66,230,119,250,5,85,9,255,253,39,5,238,88,207,60,229,238,92,28,224,63,70,109,126,152,54,188,71,18,186,162,153,21,61,132,71,202,121,113,207,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,191,14,175,103,174,227,73,235,177,89,118,163,111,237,172,26,74,6,101,80,58,211,113,127,243,254,106,224,120,179,101,156,161,76,72,189,176,20,90,137,189,135,248,136,79,63,168,193,228,45,88,94,117,184,2,226,247,10,56,240,40,136,100,146,139,246,198,195,194,209,164,217,45,202,10,147,86,175,254,198,249,92,64,121,164,0,1,0,0,253,83,6,143,195,92,106,35,254,224,103,188,253,63,192,216,128,186,79,121,216,182,90,175,191,240,47,48,93,168,74,2,141,105,206,208,33,164,189,140,91,38,36,168,242,80,217,18,184,248,245,157,129,85,249,94,94,229,138,101,38,9,1,47,241,202,110,153,206,246,252,92,214,119,95,159,94,245,61,243,40,240,8,26,143,180,81,247,55,255,244,73,12,229,83,9,1,95,192,113,95,216,242,21,235,124,16,227,245,80,217,178,9,241,140,170,135,64,175,84,27,211,70,239,73,100,139,20,245,9,1,201,169,20,123,206,251,168,141,33,64,175,106,246,185,19,185,53,101,125,53,5,87,5,184,7,21,91,61,208,130,42,131,9,1,183,44,52,109,222,204,99,77,172,182,15,29,40,214,131,168,39,33,227,213,36,163,61,162,168,47,3,62,136,241,101,126,9,1,133,79,165,174,6,191,41,30,209,5,109,104,28,93,197,246,247,13,23,242,234,3,204,110,233,229,198,255,131,62,203,105,0,9,36,146,140,19,119,166,207,36,195,156,45,70,248,235,157,242,62,129,27,38,220,53,39,229,72,57,111,212,225,115,177,211,102,235,93,180,24,37,200,29,129,191,72,73,93,114,116,50,181,244,253,225,248,223,46,101,251,180,223,113,77,242,139,0,70,112,11,77,64,172,92,53,175,44,34,221,162,120,122,145,235,86,123,6,201,36,168,251,138,233,160,91,32,192,140,33,228,96,52,194,207,181,181,131,126,57,95,233,204,152,190,4,82,34,235,53,200,202,40,109,252,73,189,213,239,94,126,130,9,1,27,8,240,147,212,200,27,37,231,124,191,110,45,189,91,214,149,171,253,138,221,47,115,230,14,214,92,143,87,109,114,128,17,1,249,248,63,134,138,17,62,7,250,227,100,52,50,139,214,30,153,110,204,16,117,222,9,119,59,220,202,187,15,30,237,162,217,9,1,207,50,50,89,38,214,97,46,146,127,167,239,70,37,230,216,37,111,63,130,63,184,65,242,102,240,65,120,90,218,241,226,9,1,247,124,190,104,95,142,126,239,68,219,69,165,161,237,129,135,165,5,236,239,227,84,140,240,18,4,129,67,95,125,116,254,0,70,112,11,77,64,172,92,53,175,44,34,221,162,120,122,145,235,86,123,6,201,36,168,251,138,233,160,91,32,192,140,33,64,31,45,164,25,35,131,214,111,103,185,66,123,36,77,209,130,54,238,77,124,250,76,42,126,68,137,156,53,223,112,84,9,1,172,109,18,138,162,172,98,227,191,233,228,200,186,6,38,31,205,90,238,83,85,200,140,40,95,174,70,100,236,184,92,217,161,244,45,89,208,100,220,62,250,92,105,132,16,63,83,84,164,87,143,157,56,238,206,71,73,61,115,66,84,21,49,226,43,98,209,124,67,230,245,74,241,47,105,36,12,239,120,5,217,170,54,156,84,161,240,229,12,107,226,171,19,248,82,37,157,153,49,126,15,161,81,30,210,115,133,159,227,100,212,172,149,230,75,232,210,108,56,145,60,23,37,166,185,84,193,191,193,253,113,198,103,19,58,1,211,88,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,244,204,176,178,59,175,3,143,139,2,242,240,21,87,122,194,191,65,151,96,89,50,229,228,174,155,172,240,102,252,221,88,161,54,97,92,243,73,215,246,52,72,145,177,231,202,124,114,136,63,93,192,73,97,131,206,41,240,31,150,151,163,154,135,110,104,89,178,252,214,86,245,40,217,82,157,194,186,14,137,246,116,87,3,221,0,88,209,228,65,175,80,39,254,110,76,107,116,157,222,72,114,28,239,59,179,26,8,211,214,75,156,110,156,114,90,188,114,36,198,234,1,57,238,186,239,33,185,70,78,68,110,74,247,188,177,180,151,164,216,15,115,133,254,13,247,190,87,17,67,0,232,148,145,23,177,217,122,193,138,141,18,36,190,30,64,69,198,123,105,131,146,212,237,192,61,179,237,70,185,233,70,115,24,243,123,134,88,68,215,142,127,133,2,149,85,85,176,160,214,111,3,112,143,142,78,137,211,79,138,29,6,66,209,69,161,250,30,45,145,12,250,62,92,70,95,125,127,105,194,36,213,66,224,165,152,168,81,60,218,234,227,67,148,118,29,59,147,53,55,78,40,8,227,39,217,122,18,110,222,78,162,140,204,238,55,6,95,41,25,0,0,0,0,144,32,12,17,126,234,225,99,200,138,138,108,231,51,212,1,171,8,94,147,139,188,115,131,162,159,107,192,34,19,171,180,161,151,178,33,144,104,181,16,79,208,222,10,42,70,102,179,246,243,151,172,167,86,83,107,175,210,186,181,198,128,36,151,56,29,81,196,245,31,168,78,1,68,190,24,94,31,195,247,20,122,219,85,214,0,172,224,132,217,231,157,205,174,1,20,9,234,148,84,215,130,24,9,46,11,24,156,214,165,23,59,68,102,116,0,213,17,230,13,150,23,44,152,198,242,109,118,74,176,93,184,102,158,85,104,138,31,78,160,214,75,29,223,239,114,74,97,156,59,161,94,69,18,60,117,174,107,34,34,42,46,244,10,252,240,128,232,79,90,245,105,207,185,63,169,21,50,218,22,157,164,50,155,208,136,29,218,73,246,12,13,209,254,95,239,141,35,221,253,207,221,212,9,1,248,135,166,243,99,75,238,65,244,69,142,255,92,110,93,81,203,0,116,149,135,131,96,149,14,49,60,161,204,107,128,214,10,1,164,102,179,128,123,95,250,209,85,215,47,223,202,234,9,179,137,135,46,66,252,162,20,55,210,106,243,173,46,40,178,89,161,54,97,92,243,73,215,246,52,72,145,177,231,202,124,114,136,63,93,192,73,156,179,78,101,253,32,134,94,34,150,203,47,196,201,136,120,12,142,64,149,165,101,204,29,186,80,109,39,8,84,79,232,161,128,2,205,152,207,181,99,73,42,111,179,231,200,36,59,123,154,212,204,146,140,190,129,206,40,197,26,241,53,110,14,80,134,5,37,204,178,45,166,159,129,221,136,86,105,96,102,217,243,16,89,249,161,104,60,252,156,182,35,11,128,137,157,188,182,24,21,145,214,144,137,216,164,177,52,180,226,24,99,117,67,64,64,241,12,41,231,167,74,209,204,218,129,255,34,102,39,251,93,142,41,145,92,203,50,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,164,143,172,103,50,238,187,229,211,197,117,198,212,234,78,82,14,197,48,46,66,117,89,74,193,245,113,83,209,177,183,217,161,81,125,159,102,197,100,210,237,244,60,182,87,66,79,114,241,194,13,59,255,230,154,81,238,249,118,81,47,67,167,51,81,39,137,145,76,187,230,155,155,70,102,153,242,27,165,84,224,218,253,106,44,9,255,60,175,19,171,24,127,90,244,102,215,109,52,163,108,87,248,78,75,87,18,254,157,225,108,251,34,166,46,135,226,155,114,9,1,137,66,124,98,185,206,118,62,215,166,225,249,150,252,161,203,0,217,115,193,56,251,207,17,2,102,50,45,221,187,226,50,177,93,224,137,41,163,192,131,82,50,167,205,32,29,51,131,23,202,171,216,115,0,2,86,146,226,162,251,7,31,172,102,81,196,200,22,126,55,247,235,106,113,202,76,139,128,246,85,114,151,178,101,95,159,191,161,51,139,210,222,212,86,156,86,143,33,218,23,74,206,199,11,130,110,85,12,238,250,55,221,42,57,86,177,201,103,75,239,3,21,25,161,11,112,13,117,46,113,1,71,100,17,102,72,32,125,98,78,0,1,0,0,253,83,6,143,195,92,106,35,254,224,103,188,253,63,192,216,128,186,79,121,216,182,90,175,191,240,47,48,93,22,155,114,188,54,101,62,87,11,121,52,68,25,197,106,219,1,155,20,238,224,223,45,229,125,209,123,117,113,166,132,12,161,220,14,214,252,191,229,28,159,132,166,98,230,79,209,52,119,54,170,116,134,81,122,205,57,136,220,234,247,156,218,204,189,130,97,114,48,48,160,227,48,48,64,28,58,212,244,195,137,255,228,64,41,161,128,2,205,152,207,181,99,73,42,111,179,231,200,36,59,123,154,212,204,146,237,236,215,194,80,85,137,94,24,27,243,41,116,140,187,70,33,135,209,48,214,17,9,198,53,117,79,21,244,235,240,208,9,255,219,215,135,89,179,97,218,60,174,210,247,236,175,60,97,114,55,143,26,104,199,6,53,175,153,170,254,4,26,49,33,168,161,54,97,92,243,73,215,246,52,72,145,177,231,202,124,114,136,63,93,192,73,131,75,158,95,145,124,241,215,162,81,17,8,190,214,110,90,15,123,1,214,244,31,200,40,196,119,19,72,17,44,27,219,161,44,60,95,185,9,118,123,106,246,85,186,215,47,93,102,56,245,245,208,160,62,110,21,209,63,190,73,44,77,121,157,143,198,176,46,157,199,11,251,128,18,248,171,99,94,148,201,218,67,21,70,232,9,255,16,46,124,130,188,155,165,96,66,61,124,176,157,94,180,222,164,199,68,147,148,121,54,59,60,181,162,4,74,28,114,103,9,1,0,0,0,60,9,235 + ], + "state_diffs_hash": "0xc83cac9cd98a4216cbc0d0830e63c4956e4a1c45c122ebbc88af7ea3b496c406", + "aux_commitments": { + "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", + "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" + }, + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + "local_root": "0xd4790efa9052ea67dcb473de870e3522e2fc340374e6293ad4646fde312c8c76" + } + }, + "meta_parameters": { + "zkporter_is_available": false, + "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", + "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", + "protocol_version": "Version27" + }, + "pass_through_data": { + "shared_states": [ + { + "last_leaf_index": 212, + "root_hash": "0x0332d2acc43785a44b2b84fc010372c8f3e4ff4d0ca5f312de142ffe74189500" + }, + { + "last_leaf_index": 0, + "root_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "input": { + "PostBoojum": { + "common": { + "l2_to_l1_logs": [ + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x91ac392a7af99b6df974efe2d6b40e35dc79156fa3b75ea257df4976da0c26e8", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xa088c0c1710f2244aad45e356742e7ac7773a153cf23db6cec4ded7e8da05d69", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 2, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xccdf8bf8f4bf596f8fbb7ed18d67ef6208919707de498d0cae4d2c01c50e2305", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 3, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xe43e076103a2a867921c20b43365e7729003f1a00558c3dc51b31b25c90b2b2a", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 4, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3eba0d3506eba94a8b328b82b3a32623c2ef77e253bfbb82d2667b163c8714c7", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 5, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xdaf8935b934fe9513d112e3ca61a728dbfae2fdb5ea1edf8e6f56b8283aa4cd8", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 6, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x5527767da575eb332ed996603486300af0f55116f2a177b0c05ed31518a23d77", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 7, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x15f4d69332763eaaf4d80e05e88b04d45d38d0854381f58e4c01688496e03f63", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 8, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x96a04ccc56dc1cea06a00fe97af3231766aee89c948c86f0c16eeebcdddc0aa3", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 9, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x886292e17714665013e6c7fc19b12b15a69676394ec88ceb5d1698a0b198a7dd", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 10, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x9e84bba4d8497ea90d8c5513063912bdbd9cc85ac68185ee1715a7b15ca01f17", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 11, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3795b197a06415b6b00d2bde238a25741ecc9791269d899c97ff983d88dcd5e6", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 12, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0e5be5348f9a9fd4e936c4fad6793e0e4395f5642d1b5f9a08e1a3703226f8ef", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 13, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xec3e691650319cdf9fbc5410f388ea24f2c9325b0d7b4ce37db2a1c5957bd86b", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 14, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xfa448e8ac5560693b992b70fae5e08f3e9cae510c8e1fa69d2c212dd1811bf05", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 15, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x6c5a74345d321eb4edebdf43f42a11bc409613a9b92cbfe96730498217b12d43", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 16, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x7912d592b280f7f7a5d04c68eaddae09b518816a0a6d97bc89b143ae3109e78f", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 17, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3c1fad3b48be6cb9503048860557f3ef99dccdf1f51dfbf26570f630469b1a98", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 18, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xb7e755892fbe6870e93cbd3c0945d772e141b94ee50aa75a2d7bb7219fb53266", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 19, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xb81f1f0fbe80e956956132771d1a99c35bd52856adbf932cc061d3980a79c124", + "value": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 20, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x8f5a7c0d48c9b82137c446c9db31ce5ef4e1a30166dd3ae09580c33595bbe2b7", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 21, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x2f6516d033cfa362a407a7d2d2279c62fa185eaae6742bc6f51fdcb51606094e", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 22, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x82eb8a4152ff724ef814c3ddacea2a65e6e6d09a00d72e57fff9e12b7857461d", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 23, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x08a95d3f4505e0e3fb90a2002a81750c0bae658a5d4a290acaeacdfc2691560a", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 24, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xb8c38e08db553411378fc77ca81f20da7d5b1be77fb316393e33bfe0c08565dd", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 25, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xd54d7593c4d133e4903becb318f109246537ddab2646148ac51ac7c94e25ef8c", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 26, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3e86b6ddb211d47e057c4e246810e2dbb10061c2679e52ae7e4b647c9c98bf08", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 27, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xe1cee6c0528143fa82ff667c9655d2d775dccdb4204791956096a6225059c9b8", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "rollup_last_leaf_index": 212, + "rollup_root_hash": "0x0332d2acc43785a44b2b84fc010372c8f3e4ff4d0ca5f312de142ffe74189500", + "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", + "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", + "protocol_version": "Version27" + }, + "system_logs": [ + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 0, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000002", + "value": "0xf9030b78c5bf5ac997a76962aa32c90a6d8e8ebce9838c8eeb388d73e1f7659a" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000007", + "value": "0x91ac392a7af99b6df974efe2d6b40e35dc79156fa3b75ea257df4976da0c26e8" + }, + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 28, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x00000000000000000000000066fc024100000000000000000000000066fc0242" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000003", + "value": "0x190bda1fde651ac21cf771cb9f125f486678abbab229cce182a7c9a07361afbe" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000004", + "value": "0x000000000000000000000000000000000000000000000000000000000000001b" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x38eaeef3afe69b6f6b2fa22c92da8137f1e405a1e1861b7de7cfa30c7d7462dd" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000006", + "value": "0x000000000000000000000000cc4b013229ffd6cb5eae5876251874172cafed0a" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000005", + "value": "0x335f4f11c3e55bb502bcbdedfd8e63b8e5c84bea465c984a5c664a8eca7d4a7a" + } + ], + "state_diffs": [ + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x3e013dc3eb10bbd48a8f9c94758f04d081563b6", + "derived_key": [ + 112,120,89,162,183,230,11,175,17,100,223,232,175,83,47,195,198,157,29,129,145,197,186,61,127,17,109,250,141,181,206,45 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x868a8819e738818dabe8bfb671ae8e027372dd7", + "derived_key": [ + 130,208,215,121,46,249,196,126,160,123,216,26,86,45,8,246,35,74,8,171,141,141,223,145,137,150,142,180,236,158,154,37 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000ecfaeb837bd098bcf9bde6fc2ccd8e8a9355a1b70e601ac18cd089eb308" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x9af32a0f1b0914742c84d68795a9be9abd6bbd5", + "derived_key": [ + 246,11,47,22,184,171,230,29,125,57,179,213,44,191,157,128,184,167,253,5,55,217,60,33,8,75,147,188,5,4,171,60 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000c328f328dd820a1dd698047a7f7d49874d8259196e273517430af8f480" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x22acca3c358a523c1ecbf1491d131a597aada298", + "derived_key": [ + 203,204,98,199,195,136,172,152,215,47,208,131,209,215,32,206,186,255,203,162,198,108,114,94,200,185,197,197,240,116,111,138 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100005b8e587974043d59bffbf632d020e764959abe62e4c238d8df2e62b2b5" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "derived_key": [ + 235,143,74,199,189,78,241,151,159,154,102,86,114,178,92,208,123,30,61,99,122,89,162,199,107,26,34,232,91,117,146,65 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x30439cdc8796fb3cecb53f4bf5b133f581b5b40f", + "derived_key": [ + 66,202,106,148,168,163,117,186,10,227,150,70,185,29,164,88,23,175,73,33,116,119,174,107,73,193,3,53,191,78,11,115 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001b37e5f8cafac924b3e663779e12c80dace8a02b1d273ae708f275ad62a" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x338bd2ded4569c568f21da174acec70b826e550c", + "derived_key": [ + 71,164,52,102,233,54,181,135,193,191,158,16,243,13,105,123,17,66,228,89,233,6,51,219,18,27,114,127,245,180,121,43 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000067554f53c6660e53bd9729f00aee0189010dfeb868fa3cbd481232480c" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x48a6f6788413af58f1bdf8c963cb67a4346f5fd8", + "derived_key": [ + 95,233,138,250,154,14,193,124,34,99,2,123,232,159,70,229,238,83,58,30,33,169,31,255,76,177,68,204,74,239,33,188 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10005b597eb02058b230cee6a9cdf4705410d151ad1ea6ad3e2de77841acfc2" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75", + "derived_key": [ + 142,107,134,4,159,177,116,20,22,26,103,250,177,44,27,34,144,68,85,154,16,112,202,176,23,66,216,18,18,139,156,190 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000067554f53c6660e53bd9729f00aee0189010dfeb868fa3cbd481232480c" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x517d9f66c564d2edf43cb657424f72f1c20d3bff", + "derived_key": [ + 140,78,248,13,214,150,89,51,34,40,248,136,164,249,180,131,12,12,50,27,140,170,45,67,53,35,250,200,97,150,181,207 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100048bf1b165f4c850d01eb81e5c040f70ea01dca019e780a87f80ec7897c3" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x55f6f01d04a21e76cbd2de9d4a9ff6ee9f8893a6", + "derived_key": [ + 235,96,109,232,32,223,19,108,89,177,255,121,225,223,146,96,66,108,164,143,175,146,74,70,53,36,33,31,183,195,161,165 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000201e923f3e47adfa7c4f0f61ea8dd4256f66d1099970bcd314892864917" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x5caf5f2b06ca757c7cebacdcd6f163af45a6bb83", + "derived_key": [ + 95,127,60,252,81,90,137,254,204,8,97,6,46,254,57,252,48,104,36,27,116,128,26,163,170,147,245,100,172,98,242,54 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100061dc40d0bc2cb81905e6bc1421ebd4bd8b79aacb8da4f9d7145b288674f" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x5e45123c75ae6b22222a2ef40afcf080e84f5af5", + "derived_key": [ + 207,14,96,187,125,119,111,198,230,184,241,1,19,161,190,119,25,192,44,34,151,163,108,216,124,11,59,35,121,140,74,95 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100070f1cd4e2830607af30192743910fdfabf1dd004a8073944275819d1dfe" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "derived_key": [ + 53,89,195,90,167,22,152,246,194,202,70,67,239,232,80,69,169,73,79,38,45,119,238,103,193,61,215,52,230,38,48,90 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x683cfc9cb6230b80899dbcb6181591d69089d8a4", + "derived_key": [ + 186,93,97,46,65,80,43,253,205,126,211,179,176,210,212,177,245,200,248,185,15,209,21,42,187,224,222,192,14,162,61,7 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100048bf1b165f4c850d01eb81e5c040f70ea01dca019e780a87f80ec7897c3" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x7cf7e4a85a7a677f6a5b2fe169e6d5eef29219c5", + "derived_key": [ + 44,61,141,160,220,54,28,84,148,218,146,175,212,98,94,116,25,190,241,121,131,189,209,145,214,33,89,62,212,173,57,47 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000323b7266314b9d3ec387d5bcd9590a50be00e788837ca5a880d97e3ee83" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7", + "derived_key": [ + 48,100,113,86,255,138,164,229,100,8,99,14,34,251,194,115,119,250,250,242,7,188,204,248,210,254,18,115,9,165,229,233 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000215cd6f306d757d5d2fdbb460c60f298584179eecc07196c7ae8e4f8d64" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x9961618bfad393730ea065a18399303330f1395f", + "derived_key": [ + 200,51,40,178,38,74,180,112,167,221,220,163,38,200,255,61,159,78,76,252,60,226,78,168,221,216,201,180,12,20,188,185 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000201e923f3e47adfa7c4f0f61ea8dd4256f66d1099970bcd314892864917" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xcc4b013229ffd6cb5eae5876251874172cafed0a", + "derived_key": [ + 229,176,77,5,169,51,88,54,33,49,122,209,137,227,159,45,116,33,7,146,238,29,46,153,91,171,175,162,128,71,14,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100015306fb0f0b5fde219996551c8072c6711bba5300b2669aeb0f0e4c7445" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xcc8e14c05825cde94522515a0303e4c2e07ca6f9", + "derived_key": [ + 250,253,202,115,87,157,171,40,23,48,73,193,157,78,81,69,162,232,29,120,68,42,125,135,121,254,156,149,143,198,173,119 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000035ba31ed16c21160780ac09763bfd593b1748f69e683a9fb10921aa49d" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xdb5b25a5c3ff135d39df0dd3417a6b26724d2b24", + "derived_key": [ + 188,67,139,78,87,51,241,113,6,164,3,59,144,48,243,76,127,49,1,147,79,102,218,253,36,37,149,91,92,247,35,64 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000ed306b29128e065afdce73ebc8e0ecd08cd918fdc0f74c03f64d515e48" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xdc0ed6fcbfe51c9f84a662e64fd1347736aa7486", + "derived_key": [ + 6,115,118,133,155,130,106,66,115,187,68,69,230,28,222,77,91,95,90,23,4,86,255,161,95,247,195,108,233,152,241,190 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100013f4a2c964c71d49f2d8876a11eee4bf4e8d19231652fc775b2cae43a21" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xe024f9e4e8fa2f08f768c1cb56bc4a6e3cbd8834", + "derived_key": [ + 129,4,114,76,225,212,61,128,125,223,69,48,213,107,167,249,183,181,194,21,67,99,215,247,166,215,108,189,158,61,249,130 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100006721ab911a41d8b52502ea4cdf42ec99e5e529be6a3e66f3adb2143c5a" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xe300eb4b0834a551cac3e93f30380643ce153408", + "derived_key": [ + 51,232,118,240,225,158,242,19,13,216,95,254,79,35,196,212,101,148,164,24,219,221,10,181,111,253,164,76,93,72,246,206 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10005bb7471a0016b1c330f767a5e73ad0c330934694cad007c5aa3f926be65" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xf0e50c6be2ab13f852259d99317e0fa1511ed273", + "derived_key": [ + 25,143,99,94,50,239,35,14,215,12,184,219,25,32,81,51,246,142,27,126,246,157,133,33,13,119,172,197,111,163,43,234 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000515657ebe014d9151480712de7a108d9b0e1a7a798d3da2945ba53cd103" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xfa1e2d910cfa3e5c465f7d7f69c224d542e0a598", + "derived_key": [ + 90,251,120,52,143,255,91,253,53,60,239,129,160,65,213,230,214,195,241,114,123,145,145,220,232,75,132,91,7,118,101,237 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10006311257c403bbba7e2aef0aa20822627c82ec7484fcb31b87dd8c582aa9" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0x64534fbb7489d8b2e0974a2a70dee20ad40795d90f17c1a6d62ba36ea19e007", + "derived_key": [ + 240,163,222,200,2,37,101,9,35,172,42,74,77,142,96,167,8,137,208,171,61,234,142,107,218,41,37,203,138,127,216,252 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0x1b458e5ab877fea2e4abf98d12b31ec3f7c93fd4856e807f684322e8cf11fdf7", + "derived_key": [ + 174,229,34,198,20,187,1,37,21,66,226,45,128,16,30,45,151,85,103,77,143,214,69,38,254,154,44,77,223,171,97,143 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0x810ca1ae825b138452fb743e9948f909b6286cbfadd5a899190fcb21a75443ab", + "derived_key": [ + 0,159,186,133,75,226,253,235,173,50,111,19,111,136,219,244,177,114,214,77,28,237,51,180,171,99,164,148,28,226,73,151 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x700000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0xe6d904d46c5d8b2934bf40eee45740c707124a9797010ceae3f79534391b6de5", + "derived_key": [ + 201,172,113,10,243,67,127,194,244,249,48,131,50,164,72,10,88,81,76,45,149,28,73,119,114,174,142,141,132,8,175,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 182,81,125,81,147,30,201,86,98,178,2,213,133,189,82,214,234,207,27,118,113,82,28,46,150,32,45,104,62,223,226,99 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe00000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000035ba31ed16c21160780ac09763bfd593b1748f69e683a9fb10921aa49d", + "derived_key": [ + 28,170,140,159,117,250,84,163,177,210,240,18,225,217,234,99,118,79,112,157,28,25,151,121,72,28,143,77,92,237,107,62 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100005b8e587974043d59bffbf632d020e764959abe62e4c238d8df2e62b2b5", + "derived_key": [ + 45,86,211,71,72,250,222,240,196,161,223,115,65,15,173,85,177,255,211,89,90,168,146,255,238,205,12,128,137,196,203,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100006721ab911a41d8b52502ea4cdf42ec99e5e529be6a3e66f3adb2143c5a", + "derived_key": [ + 12,138,2,247,115,213,230,144,244,253,93,195,182,20,84,243,71,244,71,24,244,103,128,231,65,233,198,173,128,126,246,169 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000067554f53c6660e53bd9729f00aee0189010dfeb868fa3cbd481232480c", + "derived_key": [ + 185,67,192,97,81,170,106,240,157,23,26,106,216,228,65,120,68,165,135,110,2,31,216,158,187,67,79,105,151,157,234,15 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10000c328f328dd820a1dd698047a7f7d49874d8259196e273517430af8f480", + "derived_key": [ + 12,91,247,177,168,165,63,93,186,29,121,106,121,167,27,50,198,22,230,125,252,159,77,132,92,155,115,251,100,87,112,147 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10000ed306b29128e065afdce73ebc8e0ecd08cd918fdc0f74c03f64d515e48", + "derived_key": [ + 127,216,219,101,110,162,74,65,114,24,131,123,143,204,15,44,36,72,8,136,170,255,39,231,108,143,128,71,65,95,117,4 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10000fd53068fc35c6a23fee067bcfd3fc0d880ba4f79d8b65aafbff02f305d", + "derived_key": [ + 4,27,221,133,245,254,194,84,195,88,19,141,109,233,58,225,116,116,251,225,170,44,159,23,28,181,85,238,6,151,63,144 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100013f4a2c964c71d49f2d8876a11eee4bf4e8d19231652fc775b2cae43a21", + "derived_key": [ + 130,114,163,11,234,79,36,77,175,198,107,147,58,183,234,134,122,178,61,205,225,34,184,146,138,50,221,70,198,19,2,191 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece", + "derived_key": [ + 41,151,245,131,127,195,211,240,254,148,60,106,169,97,173,173,118,116,195,243,213,115,169,17,155,83,25,181,108,68,48,51 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100015306fb0f0b5fde219996551c8072c6711bba5300b2669aeb0f0e4c7445", + "derived_key": [ + 236,254,112,114,24,207,172,186,132,163,119,198,20,66,226,51,51,142,39,212,88,198,68,118,92,136,138,204,26,155,11,165 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10001b37e5f8cafac924b3e663779e12c80dace8a02b1d273ae708f275ad62a", + "derived_key": [ + 215,116,3,65,140,246,136,209,81,15,184,214,188,49,94,63,81,57,12,135,108,143,41,137,113,88,11,84,15,30,158,184 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000201e923f3e47adfa7c4f0f61ea8dd4256f66d1099970bcd314892864917", + "derived_key": [ + 50,203,140,60,152,140,103,117,103,130,42,29,70,236,110,49,211,18,247,40,117,35,54,107,171,190,233,18,117,69,68,43 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000215cd6f306d757d5d2fdbb460c60f298584179eecc07196c7ae8e4f8d64", + "derived_key": [ + 95,50,92,228,81,22,16,190,182,42,66,158,131,165,204,25,25,20,143,210,29,170,143,129,94,111,129,132,227,28,102,180 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100023dc5e29b1af44a05d231db67a62a8bfd0c06217caa29b061daa7f2913f", + "derived_key": [ + 96,153,155,64,152,142,147,161,102,88,190,194,238,147,14,243,71,26,33,184,193,50,249,29,88,2,52,157,179,7,69,77 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000323b7266314b9d3ec387d5bcd9590a50be00e788837ca5a880d97e3ee83", + "derived_key": [ + 212,255,51,143,196,70,53,156,98,221,171,235,82,21,252,198,242,28,2,246,195,67,6,91,2,240,95,173,200,49,66,89 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100048bf1b165f4c850d01eb81e5c040f70ea01dca019e780a87f80ec7897c3", + "derived_key": [ + 58,212,15,123,117,242,193,79,217,63,177,112,56,232,153,140,93,188,111,168,108,138,82,113,212,107,209,150,246,205,191,157 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000515657ebe014d9151480712de7a108d9b0e1a7a798d3da2945ba53cd103", + "derived_key": [ + 204,180,39,152,129,136,139,125,156,240,127,28,205,2,65,12,140,132,177,76,5,4,95,204,205,9,179,77,57,148,6,231 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10005b597eb02058b230cee6a9cdf4705410d151ad1ea6ad3e2de77841acfc2", + "derived_key": [ + 249,145,142,225,129,214,86,160,12,71,51,28,109,238,246,115,57,184,6,234,138,46,107,81,103,128,201,242,101,51,179,68 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10005bb7471a0016b1c330f767a5e73ad0c330934694cad007c5aa3f926be65", + "derived_key": [ + 17,109,102,169,143,117,42,93,149,160,20,188,122,34,0,140,248,73,206,232,146,65,183,250,61,35,40,54,167,63,173,215 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100061dc40d0bc2cb81905e6bc1421ebd4bd8b79aacb8da4f9d7145b288674f", + "derived_key": [ + 58,197,183,31,149,121,187,250,193,140,202,222,69,149,235,105,78,113,59,213,78,241,15,40,62,137,46,19,193,78,85,31 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10006311257c403bbba7e2aef0aa20822627c82ec7484fcb31b87dd8c582aa9", + "derived_key": [ + 209,81,122,212,165,252,102,254,115,58,127,209,26,21,188,113,69,3,30,255,154,72,181,219,97,7,227,96,209,19,138,181 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100070f1cd4e2830607af30192743910fdfabf1dd004a8073944275819d1dfe", + "derived_key": [ + 151,245,134,14,100,30,161,175,227,142,158,71,197,157,213,103,198,28,241,51,173,107,242,84,76,53,176,101,132,26,29,60 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000ecfaeb837bd098bcf9bde6fc2ccd8e8a9355a1b70e601ac18cd089eb308", + "derived_key": [ + 72,239,59,253,40,148,227,213,236,98,100,14,198,212,71,148,180,209,64,152,228,196,11,209,109,231,183,97,135,156,172,241 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xe778a21bcfe90796edfc6e5dba276e58537d4ff192bc30765e18d9ef2aa9a55", + "derived_key": [ + 205,107,120,198,53,118,206,64,8,204,37,15,58,43,95,189,38,38,203,212,73,105,50,160,21,160,38,124,10,233,46,22 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10e" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x201f2e5549e69135c92587d30523c730fd01553abf72828402fad9b12c172e10", + "derived_key": [ + 204,0,103,138,56,182,245,161,43,137,56,202,232,138,228,30,242,80,214,237,253,8,17,251,148,203,85,106,127,162,114,20 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x35dc0a033f8f3476b52059199e9babf078fddd76cb3c290e05ae42462bfc33eb", + "derived_key": [ + 89,16,198,41,13,94,89,2,119,169,28,80,179,104,66,21,38,252,16,146,163,159,122,68,234,161,165,150,251,139,57,4 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x52f431aa35dd80982e3c66614112503ca2c6e344745f4a2bcfc9bd7e09c75584" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x3859fd065954dbed7c74a1359d0e5bc38403ea4cdf0274ae615ce0e3e2afec6b", + "derived_key": [ + 67,87,214,19,120,116,147,47,78,236,178,95,23,23,1,171,197,181,63,197,52,162,224,221,93,223,35,243,248,138,100,215 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x41c023ccaa2a67013d253ba3488447c2db3843b3f988653fdf8d7c7268862ca9", + "derived_key": [ + 94,249,142,243,210,28,27,218,191,10,139,245,66,107,2,111,153,125,18,238,76,249,208,69,34,173,165,21,177,18,82,239 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10e" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x4792eb7ea10cfac9f83a8d12d965c903854b51c5cb0783e082741ecf0c20dcfe", + "derived_key": [ + 217,248,73,34,237,151,158,186,178,226,225,234,58,186,218,7,175,174,60,185,248,248,25,28,51,154,61,168,213,77,242,169 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x52f431aa35dd80982e3c66614112503ca2c6e344745f4a2bcfc9bd7e09c75584" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x5bad0400c1a2cec7acfd85c5c5c25108540c42f405d3ae6ea01209dfbcc63c29", + "derived_key": [ + 194,179,227,24,162,84,191,59,100,89,207,244,98,199,135,44,91,35,210,22,182,249,66,219,89,32,250,61,112,54,16,141 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x71d2d9399f0017d99e02441b51c782e6f5613748934c615622bc6f2327b79b8d", + "derived_key": [ + 60,170,232,127,53,17,58,173,142,247,89,247,207,149,119,134,64,14,158,82,18,231,188,179,163,89,11,174,81,43,46,153 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x75579ad6152f71bd465c7f980c773c6df73f53d82aebf8b69c1173f678af2d81", + "derived_key": [ + 132,119,216,135,171,185,255,65,210,32,46,77,152,229,32,71,244,141,39,140,188,245,22,25,184,28,198,202,132,222,174,160 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x87c06ae8fd6d2ee9919bb86c39ee03f70b0d87028d77b914408152f07043c769", + "derived_key": [ + 119,228,107,44,53,217,61,182,86,125,189,169,81,109,32,249,139,212,234,72,144,24,135,118,89,121,216,219,24,207,66,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10e" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x962d8512b88c87f0272660761794a46a130b867d7d15b38fc1adc33433e4fce8", + "derived_key": [ + 213,5,71,76,80,42,16,77,105,27,101,50,79,76,38,232,167,55,134,79,128,251,113,33,35,116,77,254,28,6,176,0 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x52f431aa35dd80982e3c66614112503ca2c6e344745f4a2bcfc9bd7e09c75584" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x9f4693a69c182083198dd36e2803bc42bbe3f851aa03cb0f0de7687a2171336b", + "derived_key": [ + 110,177,116,132,158,116,7,177,77,240,138,82,212,212,241,43,54,8,1,75,42,104,243,5,241,73,226,60,72,169,2,41 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10003" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xb35ae26426d210bd3178c283cdcb50ce0cdbff27177eb0786fc3fe0f45083b1d", + "derived_key": [ + 73,127,150,33,212,30,131,171,90,28,221,170,53,22,176,210,81,154,146,160,81,67,188,184,7,13,240,169,97,51,230,181 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x64" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xb62ada1fb8084bc5425b2aea59d59080ac3d0a10a1cc368978230741dca77a19", + "derived_key": [ + 71,111,167,179,223,229,107,45,223,184,100,207,103,16,106,234,217,25,120,51,156,12,142,28,186,4,134,110,182,28,191,11 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xcb5ca2f778293159761b941dc7b8f7fd374e3632c39b35a0fd4b1aa20ed4a091", + "derived_key": [ + 62,169,255,238,205,94,99,210,162,31,213,85,158,233,223,231,174,18,241,77,26,133,255,75,40,190,65,163,26,48,53,196 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xd8fc94fc3444dd0233f4f4f74b08d69d0079035017309fa37c5b30a7cabb729b", + "derived_key": [ + 232,83,248,233,232,89,11,170,74,117,125,224,222,189,198,137,244,49,205,228,155,200,97,42,160,89,8,63,109,25,91,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xd9ba5de301f3948ee34a04905cc32b778b54dac455410e096889003b0770d47c", + "derived_key": [ + 255,134,239,235,78,5,0,110,98,20,109,14,192,231,250,72,49,145,191,114,177,51,38,242,67,121,217,71,114,50,124,171 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xf7fa34f014959c990f8cabd865f6012c5ad2ae9390bd21dc8ab2c3ee9c340257", + "derived_key": [ + 209,167,69,145,2,139,203,92,187,46,4,30,218,0,85,77,176,3,253,201,73,229,148,92,229,57,32,59,244,12,109,96 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1", + "derived_key": [ + 113,233,23,33,249,145,133,118,215,96,240,47,3,202,196,124,111,64,3,49,96,49,132,142,60,29,153,230,232,58,71,67 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe29e64ae9c38000" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1b458e5ab877fea2e4abf98d12b31ec3f7c93fd4856e807f684322e8cf11fdf7", + "derived_key": [ + 49,87,111,239,58,195,179,2,237,163,15,66,168,74,199,52,200,236,175,1,55,3,126,248,127,239,193,246,133,27,151,79 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8603151b10a4a0" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x810ca1ae825b138452fb743e9948f909b6286cbfadd5a899190fcb21a75443ab", + "derived_key": [ + 185,211,150,83,253,116,26,253,56,22,83,204,70,30,122,203,221,134,84,251,39,141,138,17,246,159,212,31,236,239,75,201 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x53c1850fab698c0" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xafe379b9510a75924647deef7e3d3d3ebf948699c9f84eda83c07c71414098b8", + "derived_key": [ + 250,91,168,183,69,6,78,180,185,147,215,10,134,34,96,243,26,77,158,213,121,211,188,200,73,204,177,205,8,52,178,106 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x4a88ebbafe2b20" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 141,97,126,192,90,203,191,95,226,69,41,166,75,35,133,169,106,173,67,240,155,225,173,169,44,112,64,49,220,193,72,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x81d41f918fe1780" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x0", + "derived_key": [ + 150,46,36,83,88,148,64,235,173,169,107,3,33,223,255,240,191,103,10,254,52,186,74,130,141,51,66,227,241,78,210,217 + ], + "enumeration_index": 60, + "initial_value": "0x10e", + "final_value": "0x1f9" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x6", + "derived_key": [ + 162,84,32,193,217,215,5,53,140,19,76,198,1,217,209,132,203,77,253,222,126,28,172,43,195,212,211,139,249,236,68,230 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5f5e100" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x7", + "derived_key": [ + 18,59,175,197,134,247,119,100,72,140,210,76,106,119,84,110,90,15,232,189,251,79,162,3,207,175,252,54,204,228,221,91 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100000000000000000000000066fc0241" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x9", + "derived_key": [ + 142,125,208,106,197,183,59,71,59,230,188,90,81,3,15,76,116,55,101,124,183,178,155,243,118,197,100,184,209,103,90,94 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000066fc0242" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xb", + "derived_key": [ + 75,168,78,31,55,208,65,188,110,85,186,57,104,38,204,73,78,132,212,129,91,109,181,38,144,66,46,234,115,134,49,79 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xc", + "derived_key": [ + 61,226,32,44,203,98,106,211,135,215,7,34,230,79,190,68,86,46,47,35,26,41,12,8,83,43,141,106,186,64,47,245 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf2aaeb06c0e556434ac93c232f37dd8be0a7bf9f430f76eb564df9fcb770c45f" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10c", + "derived_key": [ + 121,9,53,136,208,232,71,239,167,58,16,206,32,228,121,159,177,228,102,66,214,86,23,199,229,33,63,160,73,137,217,45 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000066fc0242" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10d", + "derived_key": [ + 135,222,210,71,225,102,15,130,112,113,199,241,55,25,52,88,151,81,8,83,132,252,159,68,98,193,241,137,124,92,62,239 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100000000000000000000000000000001" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10f", + "derived_key": [ + 134,36,129,147,235,77,210,168,206,129,95,135,108,18,77,72,53,149,34,240,133,77,149,216,7,46,175,240,211,125,85,189 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x320" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x110", + "derived_key": [ + 62,137,13,108,44,59,173,166,238,204,150,3,169,156,28,98,89,237,90,100,2,241,199,108,193,139,86,140,58,239,186,15 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x22a9" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x5eff886ea0ce6ca488a3d6e336d6c0f75f46d19b42c06ce5ee98e42c96d256c7", + "derived_key": [ + 145,29,210,173,116,63,242,55,212,17,100,138,15,227,44,109,116,238,192,96,113,106,42,116,53,47,107,28,67,91,93,103 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf9030b78c5bf5ac997a76962aa32c90a6d8e8ebce9838c8eeb388d73e1f7659a" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x0", + "derived_key": [ + 214,116,246,54,163,90,111,26,81,86,78,195,55,27,156,77,163,18,109,90,208,186,227,80,207,199,250,234,199,99,99,184 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x33", + "derived_key": [ + 104,98,246,102,108,85,7,252,51,21,214,132,35,158,2,38,112,107,69,195,65,114,145,245,183,172,194,211,57,80,82,17 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xc9", + "derived_key": [ + 180,68,244,119,54,206,136,162,78,107,80,251,5,29,192,174,93,179,175,68,217,8,246,220,217,160,21,208,74,126,225,227 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10003" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xd3", + "derived_key": [ + 59,172,224,22,174,10,65,231,169,237,9,168,91,33,85,109,38,187,242,242,75,76,32,165,75,187,165,27,95,83,162,158 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10005" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xd5", + "derived_key": [ + 237,36,132,158,202,168,131,171,106,32,214,79,172,224,148,150,15,71,73,102,217,162,19,183,2,117,192,112,196,76,181,34 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x3db94d72a25f64874243af776e06f449d55ba9dd" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x4d7101ab951ded1d6f6a567c6e539f8f6a2a675fe1d5eba86fefe5192175b131", + "derived_key": [ + 83,157,81,206,111,89,151,62,178,167,63,16,226,11,189,169,125,149,14,110,8,62,221,87,116,233,142,217,139,253,153,16 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 186,154,222,88,248,170,108,168,43,242,42,43,72,15,245,221,236,232,166,232,99,81,164,123,16,213,143,51,128,251,219,183 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x99d6a8ff20aa8acdd49c8fb0cc74f2b2b57e0fa371d5aadb8e266a8cf9157ef5", + "derived_key": [ + 103,185,128,156,225,233,200,126,96,129,32,179,163,131,84,200,153,155,236,34,245,43,19,243,165,109,226,10,22,113,50,131 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xca59cc8f90e9fd91e0bc61c0c980b4b130ad1217252dd3bc209e6dfa57a05f63", + "derived_key": [ + 203,224,11,159,230,121,179,34,119,46,123,13,250,7,202,214,183,18,124,144,172,158,237,255,172,53,228,144,236,81,142,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x0", + "derived_key": [ + 183,144,135,107,116,64,90,68,61,138,52,178,244,96,20,237,96,5,52,90,158,129,172,204,39,175,55,18,74,73,29,222 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x33", + "derived_key": [ + 36,49,6,86,93,12,79,206,248,151,94,121,177,178,35,12,1,159,78,58,178,122,63,78,124,169,48,107,159,98,153,132 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0xfa", + "derived_key": [ + 208,111,9,167,39,114,89,78,179,210,171,35,115,181,211,197,236,176,132,184,74,77,237,45,48,18,241,69,222,221,138,25 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5de08929a3c0835232a7cd201d338317caabd873" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x56ca7d7fc0d180f3d83f99276f19310b5c00992edd8618fb359971a7ecb99ab3", + "derived_key": [ + 199,117,193,115,55,85,171,245,34,173,219,214,151,226,215,10,91,97,70,75,209,104,27,41,137,81,196,246,13,142,199,12 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10004" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x635799b36cb7719b903c111d5790821f9e51e29061bc47a57c7988be806aff32", + "derived_key": [ + 113,24,36,136,41,22,138,100,28,59,149,105,31,231,215,27,33,193,211,238,215,254,44,202,236,107,125,180,46,38,146,200 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0x0", + "derived_key": [ + 38,182,122,48,1,162,205,218,95,52,172,146,222,81,199,193,42,178,228,105,133,88,214,83,137,237,66,230,119,250,5,85 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0x33", + "derived_key": [ + 253,39,5,238,88,207,60,229,238,92,28,224,63,70,109,126,152,54,188,71,18,186,162,153,21,61,132,71,202,121,113,207 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0xc9", + "derived_key": [ + 191,14,175,103,174,227,73,235,177,89,118,163,111,237,172,26,74,6,101,80,58,211,113,127,243,254,106,224,120,179,101,156 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0xfb", + "derived_key": [ + 184,2,226,247,10,56,240,40,136,100,146,139,246,198,195,194,209,164,217,45,202,10,147,86,175,254,198,249,92,64,121,164 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000fd53068fc35c6a23fee067bcfd3fc0d880ba4f79d8b65aafbff02f305d" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x0", + "derived_key": [ + 168,74,2,141,105,206,208,33,164,189,140,91,38,36,168,242,80,217,18,184,248,245,157,129,85,249,94,94,229,138,101,38 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x4", + "derived_key": [ + 47,241,202,110,153,206,246,252,92,214,119,95,159,94,245,61,243,40,240,8,26,143,180,81,247,55,255,244,73,12,229,83 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x5", + "derived_key": [ + 95,192,113,95,216,242,21,235,124,16,227,245,80,217,178,9,241,140,170,135,64,175,84,27,211,70,239,73,100,139,20,245 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x6", + "derived_key": [ + 201,169,20,123,206,251,168,141,33,64,175,106,246,185,19,185,53,101,125,53,5,87,5,184,7,21,91,61,208,130,42,131 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x36b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0", + "derived_key": [ + 183,44,52,109,222,204,99,77,172,182,15,29,40,214,131,168,39,33,227,213,36,163,61,162,168,47,3,62,136,241,101,126 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x16db2e4b9f8dc120de98f8491964203ba76de27b27b29c2d25f85a325cd37477", + "derived_key": [ + 133,79,165,174,6,191,41,30,209,5,109,104,28,93,197,246,247,13,23,242,234,3,204,110,233,229,198,255,131,62,203,105 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x7bbeda1ca523343d5e888708327d45f8c743f6cb29e139a7e03dc5068543e6c4", + "derived_key": [ + 211,102,235,93,180,24,37,200,29,129,191,72,73,93,114,116,50,181,244,253,225,248,223,46,101,251,180,223,113,77,242,139 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x46700b4d40ac5c35af2c22dda2787a91eb567b06c924a8fb8ae9a05b20c08c21" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 228,96,52,194,207,181,181,131,126,57,95,233,204,152,190,4,82,34,235,53,200,202,40,109,252,73,189,213,239,94,126,130 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xac33ff75c19e70fe83507db0d683fd3465c996598dc972688b7ace676c89077b", + "derived_key": [ + 27,8,240,147,212,200,27,37,231,124,191,110,45,189,91,214,149,171,253,138,221,47,115,230,14,214,92,143,87,109,114,128 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1f9" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xe14c171e271191dbbbddd568a762a4325466b12116e776c3243375f110708d73", + "derived_key": [ + 248,63,134,138,17,62,7,250,227,100,52,50,139,214,30,153,110,204,16,117,222,9,119,59,220,202,187,15,30,237,162,217 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xe14c171e271191dbbbddd568a762a4325466b12116e776c3243375f110708d74", + "derived_key": [ + 207,50,50,89,38,214,97,46,146,127,167,239,70,37,230,216,37,111,63,130,63,184,65,242,102,240,65,120,90,218,241,226 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xf652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f", + "derived_key": [ + 247,124,190,104,95,142,126,239,68,219,69,165,161,237,129,135,165,5,236,239,227,84,140,240,18,4,129,67,95,125,116,254 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x46700b4d40ac5c35af2c22dda2787a91eb567b06c924a8fb8ae9a05b20c08c21" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0x0", + "derived_key": [ + 64,31,45,164,25,35,131,214,111,103,185,66,123,36,77,209,130,54,238,77,124,250,76,42,126,68,137,156,53,223,112,84 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0x33", + "derived_key": [ + 172,109,18,138,162,172,98,227,191,233,228,200,186,6,38,31,205,90,238,83,85,200,140,40,95,174,70,100,236,184,92,217 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf42d59d064dc3efa5c6984103f5354a4578f9d38" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc", + "derived_key": [ + 238,206,71,73,61,115,66,84,21,49,226,43,98,209,124,67,230,245,74,241,47,105,36,12,239,120,5,217,170,54,156,84 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf0e50c6be2ab13f852259d99317e0fa1511ed273" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "derived_key": [ + 133,159,227,100,212,172,149,230,75,232,210,108,56,145,60,23,37,166,185,84,193,191,193,253,113,198,103,19,58,1,211,88 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x33", + "derived_key": [ + 244,204,176,178,59,175,3,143,139,2,242,240,21,87,122,194,191,65,151,96,89,50,229,228,174,155,172,240,102,252,221,88 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x36615cf349d7f6344891b1e7ca7c72883f5dc049" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9a", + "derived_key": [ + 97,131,206,41,240,31,150,151,163,154,135,110,104,89,178,252,214,86,245,40,217,82,157,194,186,14,137,246,116,87,3,221 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x58d1e441af5027fe6e4c6b749dde48721cef3bb31a08d3d64b9c6e9c725abc72" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9b", + "derived_key": [ + 36,198,234,1,57,238,186,239,33,185,70,78,68,110,74,247,188,177,180,151,164,216,15,115,133,254,13,247,190,87,17,67 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe8949117b1d97ac18a8d1224be1e4045c67b698392d4edc03db3ed46b9e94673" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9c", + "derived_key": [ + 24,243,123,134,88,68,215,142,127,133,2,149,85,85,176,160,214,111,3,112,143,142,78,137,211,79,138,29,6,66,209,69 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xfa1e2d910cfa3e5c465f7d7f69c224d542e0a598" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9d", + "derived_key": [ + 168,81,60,218,234,227,67,148,118,29,59,147,53,55,78,40,8,227,39,217,122,18,110,222,78,162,140,204,238,55,6,95 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1900000000" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9f", + "derived_key": [ + 144,32,12,17,126,234,225,99,200,138,138,108,231,51,212,1,171,8,94,147,139,188,115,131,162,159,107,192,34,19,171,180 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0xa3", + "derived_key": [ + 86,83,107,175,210,186,181,198,128,36,151,56,29,81,196,245,31,168,78,1,68,190,24,94,31,195,247,20,122,219,85,214 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xace084d9e79dcdae011409ea9454d78218092e0b189cd6a5173b44667400d511" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc", + "derived_key": [ + 230,13,150,23,44,152,198,242,109,118,74,176,93,184,102,158,85,104,138,31,78,160,214,75,29,223,239,114,74,97,156,59 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5e45123c75ae6b22222a2ef40afcf080e84f5af5" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 105,207,185,63,169,21,50,218,22,157,164,50,155,208,136,29,218,73,246,12,13,209,254,95,239,141,35,221,253,207,221,212 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0xa2493ae5fceab9e59e3829df3da317d9a236c9b8b11dc1da94cb0e047a357cad", + "derived_key": [ + 248,135,166,243,99,75,238,65,244,69,142,255,92,110,93,81,203,0,116,149,135,131,96,149,14,49,60,161,204,107,128,214 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "derived_key": [ + 164,102,179,128,123,95,250,209,85,215,47,223,202,234,9,179,137,135,46,66,252,162,20,55,210,106,243,173,46,40,178,89 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x36615cf349d7f6344891b1e7ca7c72883f5dc049" + }, + { + "address": "0x338bd2ded4569c568f21da174acec70b826e550c", + "key": "0x0", + "derived_key": [ + 156,179,78,101,253,32,134,94,34,150,203,47,196,201,136,120,12,142,64,149,165,101,204,29,186,80,109,39,8,84,79,232 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8002cd98cfb563492a6fb3e7c8243b7b9ad4cc92" + }, + { + "address": "0x338bd2ded4569c568f21da174acec70b826e550c", + "key": "0x1", + "derived_key": [ + 140,190,129,206,40,197,26,241,53,110,14,80,134,5,37,204,178,45,166,159,129,221,136,86,105,96,102,217,243,16,89,249 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x683cfc9cb6230b80899dbcb6181591d69089d8a4" + }, + { + "address": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75", + "key": "0x0", + "derived_key": [ + 177,52,180,226,24,99,117,67,64,64,241,12,41,231,167,74,209,204,218,129,255,34,102,39,251,93,142,41,145,92,203,50 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75", + "key": "0x1", + "derived_key": [ + 164,143,172,103,50,238,187,229,211,197,117,198,212,234,78,82,14,197,48,46,66,117,89,74,193,245,113,83,209,177,183,217 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x517d9f66c564d2edf43cb657424f72f1c20d3bff" + }, + { + "address": "0x517d9f66c564d2edf43cb657424f72f1c20d3bff", + "key": "0x0", + "derived_key": [ + 230,154,81,238,249,118,81,47,67,167,51,81,39,137,145,76,187,230,155,155,70,102,153,242,27,165,84,224,218,253,106,44 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x5e45123c75ae6b22222a2ef40afcf080e84f5af5", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 60,175,19,171,24,127,90,244,102,215,109,52,163,108,87,248,78,75,87,18,254,157,225,108,251,34,166,46,135,226,155,114 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x0", + "derived_key": [ + 137,66,124,98,185,206,118,62,215,166,225,249,150,252,161,203,0,217,115,193,56,251,207,17,2,102,50,45,221,187,226,50 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5de08929a3c0835232a7cd201d338317caabd8730002" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x1", + "derived_key": [ + 86,146,226,162,251,7,31,172,102,81,196,200,22,126,55,247,235,106,113,202,76,139,128,246,85,114,151,178,101,95,159,191 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x338bd2ded4569c568f21da174acec70b826e550c" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x2", + "derived_key": [ + 238,250,55,221,42,57,86,177,201,103,75,239,3,21,25,161,11,112,13,117,46,113,1,71,100,17,102,72,32,125,98,78 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000fd53068fc35c6a23fee067bcfd3fc0d880ba4f79d8b65aafbff02f305d" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc", + "derived_key": [ + 22,155,114,188,54,101,62,87,11,121,52,68,25,197,106,219,1,155,20,238,224,223,45,229,125,209,123,117,113,166,132,12 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xdc0ed6fcbfe51c9f84a662e64fd1347736aa7486" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "derived_key": [ + 81,122,205,57,136,220,234,247,156,218,204,189,130,97,114,48,48,160,227,48,48,64,28,58,212,244,195,137,255,228,64,41 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8002cd98cfb563492a6fb3e7c8243b7b9ad4cc92" + }, + { + "address": "0x683cfc9cb6230b80899dbcb6181591d69089d8a4", + "key": "0x0", + "derived_key": [ + 237,236,215,194,80,85,137,94,24,27,243,41,116,140,187,70,33,135,209,48,214,17,9,198,53,117,79,21,244,235,240,208 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7", + "key": "0x0", + "derived_key": [ + 219,215,135,89,179,97,218,60,174,210,247,236,175,60,97,114,55,143,26,104,199,6,53,175,153,170,254,4,26,49,33,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x36615cf349d7f6344891b1e7ca7c72883f5dc049" + }, + { + "address": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7", + "key": "0x2", + "derived_key": [ + 131,75,158,95,145,124,241,215,162,81,17,8,190,214,110,90,15,123,1,214,244,31,200,40,196,119,19,72,17,44,27,219 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0" + }, + { + "address": "0xdc0ed6fcbfe51c9f84a662e64fd1347736aa7486", + "key": "0x0", + "derived_key": [ + 62,110,21,209,63,190,73,44,77,121,157,143,198,176,46,157,199,11,251,128,18,248,171,99,94,148,201,218,67,21,70,232 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0xe024f9e4e8fa2f08f768c1cb56bc4a6e3cbd8834", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 16,46,124,130,188,155,165,96,66,61,124,176,157,94,180,222,164,199,68,147,148,121,54,59,60,181,162,4,74,28,114,103 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + } + ], + "aux_commitments": { + "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", + "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" + }, + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" + } + } +} diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 59ade8873cd..957cfa9a1a6 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -1,5 +1,5 @@ use serde::{Deserialize, Serialize}; -use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; +use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; use crate::{ blob::{num_blobs_created, num_blobs_required}, @@ -80,10 +80,15 @@ pub fn l2_to_l1_logs_tree_size(protocol_version: ProtocolVersionId) -> usize { } /// Returns the blob hashes parsed out from the system logs -pub fn parse_system_logs_for_blob_hashes( +pub fn parse_system_logs_for_blob_hashes_pre_gateway( protocol_version: &ProtocolVersionId, system_logs: &[SystemL2ToL1Log], ) -> Vec { + assert!( + protocol_version.is_pre_gateway(), + "Cannot parse blob linear hashes from system logs for post gateway" + ); + let num_required_blobs = num_blobs_required(protocol_version) as u32; let num_created_blobs = num_blobs_created(protocol_version) as u32; @@ -95,9 +100,11 @@ pub fn parse_system_logs_for_blob_hashes( .iter() .filter(|log| { log.0.sender == PUBDATA_CHUNK_PUBLISHER_ADDRESS - && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY as u64) + && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY as u64) && log.0.key - < H256::from_low_u64_be((BLOB1_LINEAR_HASH_KEY + num_created_blobs) as u64) + < H256::from_low_u64_be( + (BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY + num_created_blobs) as u64, + ) }) .map(|log| (log.0.key, log.0.value)) .collect::>(); diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index bc19086c969..f974d17f4a7 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -6,6 +6,7 @@ use tokio::sync::mpsc; use zksync_multivm::{ interface::{ executor::{BatchExecutor, BatchExecutorFactory}, + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView, StorageViewStats}, utils::DivergenceHandler, BatchTransactionExecutionResult, BytecodeCompressionError, CompressedBytecodeInfo, @@ -13,12 +14,13 @@ use zksync_multivm::{ VmInterface, VmInterfaceHistoryEnabled, }, is_supported_by_fast_vm, + pubdata_builders::pubdata_params_to_builder, tracers::CallTracer, vm_fast, vm_latest::HistoryEnabled, FastVmInstance, LegacyVmInstance, MultiVMTracer, }; -use zksync_types::{vm::FastVmMode, Transaction}; +use zksync_types::{commitment::PubdataParams, vm::FastVmMode, Transaction}; use super::{ executor::{Command, MainBatchExecutor}, @@ -116,6 +118,7 @@ impl BatchExecutorFactory storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { // Since we process `BatchExecutor` commands one-by-one (the next command is never enqueued // until a previous command is processed), capacity 1 is enough for the commands channel. @@ -130,8 +133,14 @@ impl BatchExecutorFactory _tracer: PhantomData::, }; - let handle = - tokio::task::spawn_blocking(move || executor.run(storage, l1_batch_params, system_env)); + let handle = tokio::task::spawn_blocking(move || { + executor.run( + storage, + l1_batch_params, + system_env, + pubdata_params_to_builder(pubdata_params), + ) + }); Box::new(MainBatchExecutor::new(handle, commands_sender)) } } @@ -183,8 +192,8 @@ impl BatchVm { dispatch_batch_vm!(self.start_new_l2_block(l2_block)); } - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_batch_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_batch_vm!(self.finish_batch(pubdata_builder)) } fn make_snapshot(&mut self) { @@ -260,6 +269,7 @@ impl CommandReceiver { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_builder: Rc, ) -> anyhow::Result> { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); @@ -310,7 +320,7 @@ impl CommandReceiver { } } Command::FinishBatch(resp) => { - let vm_block_result = self.finish_batch(&mut vm)?; + let vm_block_result = self.finish_batch(&mut vm, pubdata_builder)?; if resp.send(vm_block_result).is_err() { break; } @@ -365,10 +375,14 @@ impl CommandReceiver { latency.observe(); } - fn finish_batch(&self, vm: &mut BatchVm) -> anyhow::Result { + fn finish_batch( + &self, + vm: &mut BatchVm, + pubdata_builder: Rc, + ) -> anyhow::Result { // The vm execution was paused right after the last transaction was executed. // There is some post-processing work that the VM needs to do before the block is fully processed. - let result = vm.finish_batch(); + let result = vm.finish_batch(pubdata_builder); anyhow::ensure!( !result.block_tip_execution_result.result.is_failed(), "VM must not fail when finalizing block: {:#?}", diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index cc759c032fc..d6118f15b98 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -203,6 +203,7 @@ impl OneshotEnvParameters { enforced_base_fee, ) .await?; + Ok(OneshotEnv { system, l1_batch, diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index dc9ef0c0e8d..d4e0a94f917 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -67,6 +67,8 @@ pub struct MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts, /// Contracts to be used after the protocol defense upgrade vm_protocol_defense: BaseSystemContracts, + /// Contracts to be used after the gateway upgrade + gateway: BaseSystemContracts, // We use `fn() -> C` marker so that the `MultiVMBaseSystemContracts` unconditionally implements `Send + Sync`. _contracts_kind: PhantomData C>, } @@ -105,6 +107,7 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version25 | ProtocolVersionId::Version26 => { &self.vm_protocol_defense } + ProtocolVersionId::Version27 => &self.gateway, }; let base = base.clone(); @@ -133,6 +136,7 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), vm_protocol_defense: BaseSystemContracts::estimate_gas_post_protocol_defense(), + gateway: BaseSystemContracts::estimate_gas_gateway(), _contracts_kind: PhantomData, } } @@ -154,6 +158,7 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( ), vm_protocol_defense: BaseSystemContracts::playground_post_protocol_defense(), + gateway: BaseSystemContracts::playground_gateway(), _contracts_kind: PhantomData, } } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index 018e5abded6..5f9e4dd3c6f 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -19,8 +19,9 @@ use zksync_multivm::{ executor::{OneshotExecutor, TransactionValidator}, storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, tracer::{ValidationError, ValidationParams}, - ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, - StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, VmInterface, + ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, + OneshotTransactionExecutionResult, StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, + VmInterface, }, tracers::{CallTracer, StorageInvocations, ValidationTracer}, utils::adjust_pubdata_price_for_tx, @@ -169,7 +170,7 @@ where ); let exec_result = executor.apply(|vm, transaction| { vm.push_transaction(transaction); - vm.inspect(&mut tracers.into(), VmExecutionMode::OneTx) + vm.inspect(&mut tracers.into(), InspectExecutionMode::OneTx) }); let validation_result = Arc::make_mut(&mut validation_result) .take() diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index fa0e530c190..e5a2d404233 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -7,8 +7,9 @@ use zksync_contracts::BaseSystemContracts; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_types::{ - block::L2BlockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, - L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, + block::L2BlockHeader, commitment::PubdataParams, fee_model::BatchFeeInput, + snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; const BATCH_COMPUTATIONAL_GAS_LIMIT: u32 = u32::MAX; @@ -263,7 +264,7 @@ impl L1BatchParamsProvider { first_l2_block_in_batch: &FirstL2BlockInBatch, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { + ) -> anyhow::Result<(SystemEnv, L1BatchEnv, PubdataParams)> { anyhow::ensure!( first_l2_block_in_batch.l1_batch_number > L1BatchNumber(0), "Loading params for genesis L1 batch not supported" @@ -317,7 +318,7 @@ impl L1BatchParamsProvider { .await .context("failed getting base system contracts")?; - Ok(l1_batch_params( + let (system_env, l1_batch_env) = l1_batch_params( first_l2_block_in_batch.l1_batch_number, first_l2_block_in_batch.header.fee_account_address, l1_batch_timestamp, @@ -333,6 +334,12 @@ impl L1BatchParamsProvider { .context("`protocol_version` must be set for L2 block")?, first_l2_block_in_batch.header.virtual_blocks, chain_id, + ); + + Ok(( + system_env, + l1_batch_env, + first_l2_block_in_batch.header.pubdata_params, )) } @@ -346,7 +353,7 @@ impl L1BatchParamsProvider { number: L1BatchNumber, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let first_l2_block = self .load_first_l2_block_in_batch(storage, number) .await diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs index 119f975fecd..60522ba338a 100644 --- a/core/lib/vm_interface/src/executor.rs +++ b/core/lib/vm_interface/src/executor.rs @@ -3,7 +3,7 @@ use std::fmt; use async_trait::async_trait; -use zksync_types::{l2::L2Tx, Transaction}; +use zksync_types::{commitment::PubdataParams, l2::L2Tx, Transaction}; use crate::{ storage::{ReadStorage, StorageView}, @@ -20,6 +20,7 @@ pub trait BatchExecutorFactory: 'static + Send + fmt::Debug { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box>; } diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index e0287483067..39f949e5d8a 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -24,8 +24,8 @@ pub use crate::{ VmRevertReason, VmRevertReasonParsingError, }, inputs::{ - L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, StoredL2BlockEnv, SystemEnv, - TxExecutionArgs, TxExecutionMode, VmExecutionMode, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, + StoredL2BlockEnv, SystemEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, }, outputs::{ BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, @@ -41,6 +41,7 @@ pub use crate::{ }; pub mod executor; +pub mod pubdata; pub mod storage; mod types; pub mod utils; diff --git a/core/lib/vm_interface/src/pubdata/mod.rs b/core/lib/vm_interface/src/pubdata/mod.rs new file mode 100644 index 00000000000..f901687b5fa --- /dev/null +++ b/core/lib/vm_interface/src/pubdata/mod.rs @@ -0,0 +1,90 @@ +use zksync_types::{ + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, ProtocolVersionId, H256, U256, +}; + +/// Corresponds to the following solidity event: +/// ```solidity +/// struct L2ToL1Log { +/// uint8 l2ShardId; +/// bool isService; +/// uint16 txNumberInBlock; +/// address sender; +/// bytes32 key; +/// bytes32 value; +/// } +/// ``` +#[derive(Debug, Default, Clone, PartialEq)] +pub struct L1MessengerL2ToL1Log { + pub l2_shard_id: u8, + pub is_service: bool, + pub tx_number_in_block: u16, + pub sender: Address, + pub key: U256, + pub value: U256, +} + +impl L1MessengerL2ToL1Log { + pub fn packed_encoding(&self) -> Vec { + /// Converts `U256` value into bytes array + fn u256_to_bytes_be(value: &U256) -> Vec { + let mut bytes = vec![0u8; 32]; + value.to_big_endian(bytes.as_mut_slice()); + bytes + } + + let mut res: Vec = vec![]; + res.push(self.l2_shard_id); + res.push(self.is_service as u8); + res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); + res.extend_from_slice(self.sender.as_bytes()); + res.extend(u256_to_bytes_be(&self.key)); + res.extend(u256_to_bytes_be(&self.value)); + res + } +} + +impl From for L2ToL1Log { + fn from(log: L1MessengerL2ToL1Log) -> Self { + fn u256_to_h256(num: U256) -> H256 { + let mut bytes = [0u8; 32]; + num.to_big_endian(&mut bytes); + H256::from_slice(&bytes) + } + + L2ToL1Log { + shard_id: log.l2_shard_id, + is_service: log.is_service, + tx_number_in_block: log.tx_number_in_block, + sender: log.sender, + key: u256_to_h256(log.key), + value: u256_to_h256(log.value), + } + } +} + +/// Struct based on which the pubdata blob is formed +#[derive(Debug, Clone, Default)] +pub struct PubdataInput { + pub user_logs: Vec, + pub l2_to_l1_messages: Vec>, + pub published_bytecodes: Vec>, + pub state_diffs: Vec, +} + +/// Trait that encapsulates pubdata building logic. It is implemented for rollup and validium cases. +/// If chains needs custom pubdata format then another implementation should be added. +pub trait PubdataBuilder: std::fmt::Debug { + fn l2_da_validator(&self) -> Address; + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; +} diff --git a/core/lib/vm_interface/src/types/inputs/execution_mode.rs b/core/lib/vm_interface/src/types/inputs/execution_mode.rs index 41492af6edc..f091a259d30 100644 --- a/core/lib/vm_interface/src/types/inputs/execution_mode.rs +++ b/core/lib/vm_interface/src/types/inputs/execution_mode.rs @@ -13,3 +13,22 @@ pub enum VmExecutionMode { /// Stop after executing the entire bootloader. But before you exit the bootloader. Bootloader, } + +/// Subset of `VmExecutionMode` variants that do not require any additional input +/// and can be invoked with `inspect` method. +#[derive(Debug, Copy, Clone)] +pub enum InspectExecutionMode { + /// Stop after executing the next transaction. + OneTx, + /// Stop after executing the entire bootloader. But before you exit the bootloader. + Bootloader, +} + +impl From for VmExecutionMode { + fn from(mode: InspectExecutionMode) -> Self { + match mode { + InspectExecutionMode::Bootloader => Self::Bootloader, + InspectExecutionMode::OneTx => Self::OneTx, + } + } +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 24f58ae72f1..cb80ba7c138 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -3,7 +3,7 @@ use zksync_types::{ }; pub use self::{ - execution_mode::VmExecutionMode, + execution_mode::{InspectExecutionMode, VmExecutionMode}, l1_batch_env::L1BatchEnv, l2_block::{L2BlockEnv, StoredL2BlockEnv}, system_env::{SystemEnv, TxExecutionMode}, diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs index 4076aa72270..f23d6f307b8 100644 --- a/core/lib/vm_interface/src/utils/dump.rs +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -1,13 +1,14 @@ -use std::collections::HashMap; +use std::{collections::HashMap, rc::Rc}; use serde::{Deserialize, Serialize}; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2BlockNumber, Transaction, H256}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, VmTrackingContracts, + BytecodeCompressionResult, FinishedL1Batch, InspectExecutionMode, L1BatchEnv, L2BlockEnv, + PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceExt, VmInterfaceHistoryEnabled, VmTrackingContracts, }; fn create_storage_snapshot( @@ -48,6 +49,7 @@ fn create_storage_snapshot( } /// VM dump allowing to re-run the VM on the same inputs. Can be (de)serialized. +/// Note, dump is not capable of finishing batch in terms of VM execution. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VmDump { pub l1_batch_env: L1BatchEnv, @@ -98,7 +100,6 @@ impl VmDump { } } } - vm.finish_batch(); vm } } @@ -162,7 +163,7 @@ impl VmInterface for DumpingVm { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { self.inner.inspect(dispatcher, execution_mode) } @@ -189,8 +190,8 @@ impl VmInterface for DumpingVm { .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) } - fn finish_batch(&mut self) -> FinishedL1Batch { - self.inner.finish_batch() + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + self.inner.finish_batch(pubdata_builder) } } diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs index e8ef87c3c7f..d12d85fa2e3 100644 --- a/core/lib/vm_interface/src/utils/shadow.rs +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -3,6 +3,7 @@ use std::{ cell::RefCell, collections::{BTreeMap, BTreeSet}, fmt, + rc::Rc, sync::Arc, }; @@ -10,9 +11,10 @@ use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transact use super::dump::{DumpingVm, VmDump}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView}, - BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, InspectExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmTrackingContracts, }; @@ -332,7 +334,7 @@ where where Shadow: VmFactory, { - let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage.clone()); + let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage); let shadow = Shadow::new(batch_env.clone(), system_env.clone(), shadow_storage); let shadow = VmWithReporting { vm: shadow, @@ -400,7 +402,7 @@ where fn inspect( &mut self, (main_tracer, shadow_tracer): &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { let main_result = self.main.inspect(main_tracer, execution_mode); if let Some(shadow) = self.shadow.get_mut() { @@ -457,10 +459,10 @@ where (main_bytecodes_result, main_tx_result) } - fn finish_batch(&mut self) -> FinishedL1Batch { - let main_batch = self.main.finish_batch(); + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(pubdata_builder.clone()); if let Some(shadow) = self.shadow.get_mut() { - let shadow_batch = shadow.vm.finish_batch(); + let shadow_batch = shadow.vm.finish_batch(pubdata_builder); let errors = main_batch.check_divergence(&shadow_batch); if let Err(err) = errors.into_result() { self.report(err); diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index 3a06d7f80cb..2c25d729e31 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -11,11 +11,14 @@ //! Generally speaking, in most cases, the tracer dispatcher is a wrapper around `Vec>`, //! where `VmTracer` is a trait implemented for a specific VM version. +use std::rc::Rc; + use zksync_types::{Transaction, H256}; use crate::{ - storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, + pubdata::PubdataBuilder, storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, + VmExecutionResultAndLogs, }; pub trait VmInterface { @@ -35,7 +38,7 @@ pub trait VmInterface { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs; /// Start a new L2 block. @@ -51,13 +54,13 @@ pub trait VmInterface { /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. - fn finish_batch(&mut self) -> FinishedL1Batch; + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch; } /// Extension trait for [`VmInterface`] that provides some additional methods. pub trait VmInterfaceExt: VmInterface { /// Executes the next VM step (either next transaction or bootloader or the whole batch). - fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { + fn execute(&mut self, execution_mode: InspectExecutionMode) -> VmExecutionResultAndLogs { self.inspect(&mut ::default(), execution_mode) } diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 18c206eaf58..a2aee8c7420 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -146,6 +146,7 @@ impl InternalApiConfig { .l1_weth_bridge_proxy_addr .unwrap_or_default(), ), + l2_legacy_shared_bridge: contracts_config.l2_legacy_shared_bridge_addr, }, bridgehub_proxy_addr: contracts_config .ecosystem_contracts diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index 85d894b7fd5..b2c4ee6465f 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -67,6 +67,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; storage .blocks_dal() diff --git a/core/node/commitment_generator/Cargo.toml b/core/node/commitment_generator/Cargo.toml index 5ec8410124f..1f4645414cb 100644 --- a/core/node/commitment_generator/Cargo.toml +++ b/core/node/commitment_generator/Cargo.toml @@ -20,6 +20,7 @@ zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_contracts.workspace = true zksync_multivm.workspace = true +zksync_system_constants.workspace = true circuit_sequencer_api_1_4_0.workspace = true circuit_sequencer_api_1_4_1.workspace = true circuit_sequencer_api_1_5_0.workspace = true diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index cf6971b041c..9a33d4766f6 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -9,7 +9,7 @@ use zksync_l1_contract_interface::i_executor::commit::kzg::pubdata_to_blob_commi use zksync_types::{ blob::num_blobs_required, commitment::{ - AuxCommitments, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, + AuxCommitments, BlobHash, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, L1BatchCommitment, L1BatchCommitmentArtifacts, L1BatchCommitmentMode, }, writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, @@ -19,7 +19,10 @@ use zksync_utils::h256_to_u256; use crate::{ metrics::{CommitmentStage, METRICS}, - utils::{convert_vm_events_to_log_queries, CommitmentComputer, RealCommitmentComputer}, + utils::{ + convert_vm_events_to_log_queries, pubdata_to_blob_linear_hashes, read_aggregation_root, + CommitmentComputer, RealCommitmentComputer, + }, }; mod metrics; @@ -263,14 +266,40 @@ impl CommitmentGenerator { } state_diffs.sort_unstable_by_key(|rec| (rec.address, rec.key)); - let blob_commitments = if protocol_version.is_post_1_4_2() { + let blob_hashes = if protocol_version.is_post_1_4_2() { let pubdata_input = header.pubdata_input.with_context(|| { format!("`pubdata_input` is missing for L1 batch #{l1_batch_number}") })?; - pubdata_to_blob_commitments(num_blobs_required(&protocol_version), &pubdata_input) + let commitments = pubdata_to_blob_commitments( + num_blobs_required(&protocol_version), + &pubdata_input, + ); + let linear_hashes = pubdata_to_blob_linear_hashes( + num_blobs_required(&protocol_version), + pubdata_input, + ); + + commitments + .into_iter() + .zip(linear_hashes) + .map(|(commitment, linear_hash)| BlobHash { + commitment, + linear_hash, + }) + .collect::>() } else { - vec![H256::zero(); num_blobs_required(&protocol_version)] + vec![Default::default(); num_blobs_required(&protocol_version)] + }; + + let aggregation_root = if protocol_version.is_pre_gateway() { + let mut connection = self + .connection_pool + .connection_tagged("commitment_generator") + .await?; + read_aggregation_root(&mut connection, l1_batch_number).await? + } else { + H256::zero() }; CommitmentInput::PostBoojum { @@ -278,7 +307,8 @@ impl CommitmentGenerator { system_logs: header.system_logs, state_diffs, aux_commitments, - blob_commitments, + blob_hashes, + aggregation_root, } }; @@ -357,14 +387,10 @@ impl CommitmentGenerator { (L1BatchCommitmentMode::Rollup, _) => { // Do nothing } - - ( - L1BatchCommitmentMode::Validium, - CommitmentInput::PostBoojum { - blob_commitments, .. - }, - ) => { - blob_commitments.fill(H256::zero()); + (L1BatchCommitmentMode::Validium, CommitmentInput::PostBoojum { blob_hashes, .. }) => { + for hashes in blob_hashes { + hashes.commitment = H256::zero(); + } } (L1BatchCommitmentMode::Validium, _) => { /* Do nothing */ } } @@ -374,14 +400,9 @@ impl CommitmentGenerator { match (self.commitment_mode, &mut commitment.auxiliary_output) { ( L1BatchCommitmentMode::Validium, - L1BatchAuxiliaryOutput::PostBoojum { - blob_linear_hashes, - blob_commitments, - .. - }, + L1BatchAuxiliaryOutput::PostBoojum { blob_hashes, .. }, ) => { - blob_linear_hashes.fill(H256::zero()); - blob_commitments.fill(H256::zero()); + blob_hashes.fill(Default::default()); } _ => { /* Do nothing */ } } diff --git a/core/node/commitment_generator/src/utils.rs b/core/node/commitment_generator/src/utils.rs index 86643b6b581..d405a1256a2 100644 --- a/core/node/commitment_generator/src/utils.rs +++ b/core/node/commitment_generator/src/utils.rs @@ -2,6 +2,7 @@ use std::fmt; +use anyhow::Context; use itertools::Itertools; use zk_evm_1_3_3::{ aux_structures::Timestamp as Timestamp_1_3_3, @@ -15,13 +16,18 @@ use zk_evm_1_5_0::{ aux_structures::Timestamp as Timestamp_1_5_0, zk_evm_abstractions::queries::LogQuery as LogQuery_1_5_0, }; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_l1_contract_interface::i_executor::commit::kzg::ZK_SYNC_BYTES_PER_BLOB; use zksync_multivm::{interface::VmEvent, utils::get_used_bootloader_memory_bytes}; +use zksync_system_constants::message_root::{AGG_TREE_HEIGHT_KEY, AGG_TREE_NODES_KEY}; use zksync_types::{ vm::VmVersion, + web3::keccak256, zk_evm_types::{LogQuery, Timestamp}, - ProtocolVersionId, EVENT_WRITER_ADDRESS, H256, U256, + AccountTreeId, L1BatchNumber, ProtocolVersionId, StorageKey, EVENT_WRITER_ADDRESS, H256, + L2_MESSAGE_ROOT_ADDRESS, U256, }; -use zksync_utils::{address_to_u256, expand_memory_contents, h256_to_u256}; +use zksync_utils::{address_to_u256, expand_memory_contents, h256_to_u256, u256_to_h256}; /// Encapsulates computations of commitment components. /// @@ -68,7 +74,8 @@ impl CommitmentComputer for RealCommitmentComputer { ), )), VmVersion::Vm1_5_0SmallBootloaderMemory - | VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(H256( + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => Ok(H256( circuit_sequencer_api_1_5_0::commitments::events_queue_commitment_fixed( &events_queue .iter() @@ -106,7 +113,8 @@ impl CommitmentComputer for RealCommitmentComputer { ), )), VmVersion::Vm1_5_0SmallBootloaderMemory - | VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(H256( + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => Ok(H256( circuit_sequencer_api_1_5_0::commitments::initial_heap_content_commitment_fixed( &full_bootloader_memory, ), @@ -234,3 +242,75 @@ pub(crate) fn convert_vm_events_to_log_queries(events: &[VmEvent]) -> Vec, +) -> Vec { + // Now, we need to calculate the linear hashes of the blobs. + // Firstly, let's pad the pubdata to the size of the blob. + if pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { + pubdata_input.resize( + pubdata_input.len() + + (ZK_SYNC_BYTES_PER_BLOB - pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB), + 0, + ); + } + + let mut result = vec![H256::zero(); blobs_required]; + + pubdata_input + .chunks(ZK_SYNC_BYTES_PER_BLOB) + .enumerate() + .for_each(|(i, chunk)| { + result[i] = H256(keccak256(chunk)); + }); + + result +} + +pub(crate) async fn read_aggregation_root( + connection: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, +) -> anyhow::Result { + let (_, last_l2_block) = connection + .blocks_dal() + .get_l2_block_range_of_l1_batch(l1_batch_number) + .await? + .context("No range for batch")?; + + let agg_tree_height_slot = StorageKey::new( + AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS), + H256::from_low_u64_be(AGG_TREE_HEIGHT_KEY as u64), + ); + + let agg_tree_height = connection + .storage_web3_dal() + .get_historical_value_unchecked(agg_tree_height_slot.hashed_key(), last_l2_block) + .await?; + let agg_tree_height = h256_to_u256(agg_tree_height); + + // `nodes[height][0]` + let agg_tree_root_hash_key = + n_dim_array_key_in_layout(AGG_TREE_NODES_KEY, &[agg_tree_height, U256::zero()]); + let agg_tree_root_hash_slot = StorageKey::new( + AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS), + agg_tree_root_hash_key, + ); + + Ok(connection + .storage_web3_dal() + .get_historical_value_unchecked(agg_tree_root_hash_slot.hashed_key(), last_l2_block) + .await?) +} + +fn n_dim_array_key_in_layout(array_key: usize, indices: &[U256]) -> H256 { + let mut key: H256 = u256_to_h256(array_key.into()); + + for index in indices { + key = H256(keccak256(key.as_bytes())); + key = u256_to_h256(h256_to_u256(key).overflowing_add(*index).0); + } + + key +} diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 7267d7e1c82..53be2fc63c7 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -28,6 +28,13 @@ fn to_fetched_block( .context("Integer overflow converting block number")?, ); let payload = Payload::decode(payload).context("Payload::decode()")?; + let pubdata_params = if payload.protocol_version.is_pre_gateway() { + payload.pubdata_params.unwrap_or_default() + } else { + payload + .pubdata_params + .context("Missing `pubdata_params` for post-gateway payload")? + }; Ok(FetchedBlock { number, l1_batch_number: payload.l1_batch_number, @@ -38,6 +45,7 @@ fn to_fetched_block( l1_gas_price: payload.l1_gas_price, l2_fair_gas_price: payload.l2_fair_gas_price, fair_pubdata_price: payload.fair_pubdata_price, + pubdata_params, virtual_blocks: payload.virtual_blocks, operator_address: payload.operator_address, transactions: payload diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 4ebcf5c9a61..db433665e57 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -295,6 +295,7 @@ impl StateKeeper { timestamp: self.last_timestamp, virtual_blocks: 1, }, + pubdata_params: Default::default(), }, number: self.last_batch, first_l2_block_number: self.last_block, @@ -568,9 +569,11 @@ impl StateKeeperRunner { let (stop_send, stop_recv) = sync::watch::channel(false); let (persistence, l2_block_sealer) = StateKeeperPersistence::new( self.pool.0.clone(), - ethabi::Address::repeat_byte(11), + Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let io = ExternalIO::new( self.pool.0.clone(), @@ -675,9 +678,11 @@ impl StateKeeperRunner { let (stop_send, stop_recv) = sync::watch::channel(false); let (persistence, l2_block_sealer) = StateKeeperPersistence::new( self.pool.0.clone(), - ethabi::Address::repeat_byte(11), + Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/db_pruner/src/tests.rs b/core/node/db_pruner/src/tests.rs index a5458e996e1..99fbada423d 100644 --- a/core/node/db_pruner/src/tests.rs +++ b/core/node/db_pruner/src/tests.rs @@ -122,6 +122,7 @@ async fn insert_l2_blocks( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 9e844a8b853..8e5032a69cf 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -126,6 +126,10 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], + state_diff_hash: Some(H256::default()), + local_root: Some(H256::default()), + aggregation_root: Some(H256::default()), + da_inclusion_data: Some(vec![]), } } diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index a832733b355..4185878d2ac 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -140,7 +140,7 @@ impl EthWatch { let finalized_block = client.finalized_block_number().await?; let from_block = storage - .processed_events_dal() + .eth_watcher_dal() .get_or_set_next_block_to_process( processor.event_type(), chain_id, @@ -180,7 +180,7 @@ impl EthWatch { }; storage - .processed_events_dal() + .eth_watcher_dal() .update_next_block_to_process( processor.event_type(), chain_id, diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 459b8855b96..6fce46f7722 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -88,8 +88,8 @@ impl GasAdjuster { anyhow::ensure!(client.gateway_mode, "Must be L2 client in L2 mode"); anyhow::ensure!( - matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata), - "Only relayed L2 calldata is available for L2 mode, got: {pubdata_sending_mode:?}" + matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata | PubdataSendingMode::Custom), + "Only relayed L2 calldata or Custom is available for L2 mode, got: {pubdata_sending_mode:?}" ); } else { anyhow::ensure!(!client.gateway_mode, "Must be L1 client in L1 mode"); diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 3e4c0ee30b9..82732342b40 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -409,6 +409,7 @@ pub async fn create_genesis_l1_batch( virtual_blocks: 0, gas_limit: 0, logs_bloom: Bloom::zero(), + pubdata_params: Default::default(), }; let mut transaction = storage.start_transaction().await?; diff --git a/core/node/logs_bloom_backfill/src/lib.rs b/core/node/logs_bloom_backfill/src/lib.rs index 4337c0b8dc9..368d2edaf69 100644 --- a/core/node/logs_bloom_backfill/src/lib.rs +++ b/core/node/logs_bloom_backfill/src/lib.rs @@ -158,6 +158,7 @@ mod tests { virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index ec2c415b9bb..77992f34c7f 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -4,7 +4,7 @@ use zksync_config::configs::{ wallets, }; use zksync_state_keeper::{MempoolFetcher, MempoolGuard, MempoolIO, SequencerSealer}; -use zksync_types::L2ChainId; +use zksync_types::{commitment::L1BatchCommitmentMode, Address, L2ChainId}; use crate::{ implementations::resources::{ @@ -39,6 +39,8 @@ pub struct MempoolIOLayer { state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option
, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, } #[derive(Debug, FromContext)] @@ -63,12 +65,16 @@ impl MempoolIOLayer { state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option
, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, ) -> Self { Self { zksync_network_id, state_keeper_config, mempool_config, wallets, + l2_da_validator_addr, + l1_batch_commit_data_generator_mode, } } @@ -129,6 +135,8 @@ impl WiringLayer for MempoolIOLayer { self.wallets.fee_account.address(), self.mempool_config.delay_interval(), self.zksync_network_id, + self.l2_da_validator_addr, + self.l1_batch_commit_data_generator_mode, )?; // Create sealer. diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index 5f63e4e1947..1a07591c1cd 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -35,7 +35,7 @@ use crate::{ /// - `L2BlockSealerTask` #[derive(Debug)] pub struct OutputHandlerLayer { - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, l2_block_seal_queue_capacity: usize, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB @@ -63,9 +63,12 @@ pub struct Output { } impl OutputHandlerLayer { - pub fn new(l2_shared_bridge_addr: Address, l2_block_seal_queue_capacity: usize) -> Self { + pub fn new( + l2_legacy_shared_bridge_addr: Option
, + l2_block_seal_queue_capacity: usize, + ) -> Self { Self { - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, l2_block_seal_queue_capacity, pre_insert_txs: false, protective_reads_persistence_enabled: false, @@ -103,11 +106,13 @@ impl WiringLayer for OutputHandlerLayer { .get_custom(L2BlockSealProcess::subtasks_len()) .await .context("Get master pool")?; + let (mut persistence, l2_block_sealer) = StateKeeperPersistence::new( persistence_pool.clone(), - self.l2_shared_bridge_addr, + self.l2_legacy_shared_bridge_addr, self.l2_block_seal_queue_capacity, - ); + ) + .await?; if self.pre_insert_txs { persistence = persistence.with_tx_insertion(); } diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index a0be233a002..1be7e00543f 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -251,7 +251,7 @@ impl StateKeeperIO for ExternalIO { pending_l2_block_header.set_protocol_version(protocol_version); } - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .l1_batch_params_provider .load_l1_batch_params( &mut storage, @@ -274,7 +274,7 @@ impl StateKeeperIO for ExternalIO { .into_unsealed_header(Some(system_env.version)), ) .await?; - let data = load_pending_batch(&mut storage, system_env, l1_batch_env) + let data = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .with_context(|| { format!( @@ -529,6 +529,7 @@ mod tests { timestamp: 1, virtual_blocks: 1, }, + pubdata_params: Default::default(), }; actions_sender .push_action_unchecked(SyncAction::OpenBatch { diff --git a/core/node/node_sync/src/fetcher.rs b/core/node/node_sync/src/fetcher.rs index 51b9f7c7a06..9c76d1d93ca 100644 --- a/core/node/node_sync/src/fetcher.rs +++ b/core/node/node_sync/src/fetcher.rs @@ -1,9 +1,10 @@ +use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state_keeper::io::{common::IoCursor, L1BatchParams, L2BlockParams}; use zksync_types::{ - api::en::SyncBlock, block::L2BlockHasher, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, - Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, + api::en::SyncBlock, block::L2BlockHasher, commitment::PubdataParams, fee_model::BatchFeeInput, + helpers::unix_timestamp_ms, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, }; use super::{ @@ -51,6 +52,7 @@ pub struct FetchedBlock { pub virtual_blocks: u32, pub operator_address: Address, pub transactions: Vec, + pub pubdata_params: PubdataParams, } impl FetchedBlock { @@ -77,6 +79,14 @@ impl TryFrom for FetchedBlock { )); } + let pubdata_params = if block.protocol_version.is_pre_gateway() { + block.pubdata_params.unwrap_or_default() + } else { + block + .pubdata_params + .context("Missing `pubdata_params` for post-gateway payload")? + }; + Ok(Self { number: block.number, l1_batch_number: block.l1_batch_number, @@ -93,6 +103,7 @@ impl TryFrom for FetchedBlock { .into_iter() .map(FetchedTransaction::new) .collect(), + pubdata_params, }) } } @@ -165,6 +176,7 @@ impl IoCursorExt for IoCursor { timestamp: block.timestamp, virtual_blocks: block.virtual_blocks, }, + pubdata_params: block.pubdata_params, }, number: block.l1_batch_number, first_l2_block_number: block.number, diff --git a/core/node/node_sync/src/sync_action.rs b/core/node/node_sync/src/sync_action.rs index e3fd56ae9bb..897abfafb2a 100644 --- a/core/node/node_sync/src/sync_action.rs +++ b/core/node/node_sync/src/sync_action.rs @@ -198,6 +198,7 @@ mod tests { timestamp: 1, virtual_blocks: 1, }, + pubdata_params: Default::default(), }, number: L1BatchNumber(1), first_l2_block_number: L2BlockNumber(1), diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 1ae148709b2..172a00e8c14 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -44,6 +44,7 @@ fn open_l1_batch(number: u32, timestamp: u64, first_l2_block_number: u32) -> Syn timestamp, virtual_blocks: 1, }, + pubdata_params: Default::default(), }, number: L1BatchNumber(number), first_l2_block_number: L2BlockNumber(first_l2_block_number), @@ -67,6 +68,7 @@ impl MockMainNodeClient { virtual_blocks: Some(0), hash: Some(snapshot.l2_block_hash), protocol_version: ProtocolVersionId::latest(), + pubdata_params: Default::default(), }; Self { @@ -106,7 +108,9 @@ impl StateKeeperHandles { let sync_state = SyncState::default(); let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::repeat_byte(1), 5); + StateKeeperPersistence::new(pool.clone(), Some(Address::repeat_byte(1)), 5) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(pool.clone()); let output_handler = OutputHandler::new(Box::new(persistence.with_tx_insertion())) .with_handler(Box::new(tree_writes_persistence)) diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index ee266a88971..89304724a7c 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -17,7 +17,7 @@ use zksync_types::{ basic_fri_types::Eip4844Blobs, commitment::{serialize_commitments, L1BatchCommitmentMode}, web3::keccak256, - L1BatchNumber, H256, + L1BatchNumber, ProtocolVersionId, H256, STATE_DIFF_HASH_KEY_PRE_GATEWAY, }; use crate::{errors::RequestProcessorError, metrics::METRICS}; @@ -226,58 +226,63 @@ impl RequestProcessor { .unwrap() .expect("Proved block without metadata"); - let is_pre_boojum = l1_batch + let protocol_version = l1_batch .header .protocol_version - .map(|v| v.is_pre_boojum()) - .unwrap_or(true); - if !is_pre_boojum { - let events_queue_state = l1_batch - .metadata - .events_queue_commitment - .expect("No events_queue_commitment"); - let bootloader_heap_initial_content = l1_batch - .metadata - .bootloader_initial_content_commitment - .expect("No bootloader_initial_content_commitment"); - - if events_queue_state != events_queue_state_from_prover - || bootloader_heap_initial_content - != bootloader_heap_initial_content_from_prover - { - let server_values = format!("events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}"); - let prover_values = format!("events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}"); - panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values - ); - } + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + + let events_queue_state = l1_batch + .metadata + .events_queue_commitment + .expect("No events_queue_commitment"); + let bootloader_heap_initial_content = l1_batch + .metadata + .bootloader_initial_content_commitment + .expect("No bootloader_initial_content_commitment"); + + if events_queue_state != events_queue_state_from_prover + || bootloader_heap_initial_content + != bootloader_heap_initial_content_from_prover + { + panic!( + "Auxilary output doesn't match\n\ + server values: events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}\n\ + prover values: events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}", + ); } let system_logs = serialize_commitments(&l1_batch.header.system_logs); let system_logs_hash = H256(keccak256(&system_logs)); - if !is_pre_boojum { - let state_diff_hash = l1_batch + let state_diff_hash = if protocol_version.is_pre_gateway() { + l1_batch .header .system_logs - .into_iter() - .find(|elem| elem.0.key == H256::from_low_u64_be(2)) - .expect("No state diff hash key") - .0 - .value; - - if state_diff_hash != state_diff_hash_from_prover - || system_logs_hash != system_logs_hash_from_prover - { - let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); - let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); - panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values - ); - } + .iter() + .find_map(|log| { + (log.0.key + == H256::from_low_u64_be(STATE_DIFF_HASH_KEY_PRE_GATEWAY as u64)) + .then_some(log.0.value) + }) + .expect("Failed to get state_diff_hash from system logs") + } else { + l1_batch + .metadata + .state_diff_hash + .expect("Failed to get state_diff_hash from metadata") + }; + + if state_diff_hash != state_diff_hash_from_prover + || system_logs_hash != system_logs_hash_from_prover + { + let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); + let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); + panic!( + "Auxilary output doesn't match, server values: {} prover values: {}", + server_values, prover_values + ); } + storage .proof_generation_dal() .save_proof_artifacts_metadata(l1_batch_number, &blob_url) diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 2c2a5630009..800dede23c7 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -130,7 +130,7 @@ impl TeeRequestProcessor { // This means we don't want to reject any execution, therefore we're using MAX as an allow all. let validation_computational_gas_limit = u32::MAX; - let (system_env, l1_batch_env) = l1_batch_params_provider + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params_provider .load_l1_batch_env( &mut connection, l1_batch_number, @@ -149,6 +149,7 @@ impl TeeRequestProcessor { l2_blocks_execution_data, l1_batch_env, system_env, + pubdata_params, })) } diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 79072f23aed..a02aeb47caf 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -25,6 +25,7 @@ use zksync_state::{OwnedStorage, ReadStorageFactory, RocksdbStorageOptions}; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ block::L2BlockHasher, + commitment::PubdataParams, ethabi::Token, protocol_version::ProtocolSemanticVersion, snapshots::{SnapshotRecoveryStatus, SnapshotStorageLog}, @@ -104,10 +105,9 @@ impl Tester { &mut self, storage_type: StorageType, ) -> Box> { - let (l1_batch_env, system_env) = self.default_batch_params(); + let (l1_batch_env, system_env, pubdata_params) = self.default_batch_params(); match storage_type { StorageType::AsyncRocksdbCache => { - let (l1_batch_env, system_env) = self.default_batch_params(); let (state_keeper_storage, task) = AsyncRocksdbCache::new( self.pool(), self.state_keeper_db_path(), @@ -122,6 +122,7 @@ impl Tester { Arc::new(state_keeper_storage), l1_batch_env, system_env, + pubdata_params, ) .await } @@ -133,12 +134,18 @@ impl Tester { )), l1_batch_env, system_env, + pubdata_params, ) .await } StorageType::Postgres => { - self.create_batch_executor_inner(Arc::new(self.pool()), l1_batch_env, system_env) - .await + self.create_batch_executor_inner( + Arc::new(self.pool()), + l1_batch_env, + system_env, + pubdata_params, + ) + .await } } } @@ -148,6 +155,7 @@ impl Tester { storage_factory: Arc, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { let (_stop_sender, stop_receiver) = watch::channel(false); let storage = storage_factory @@ -158,11 +166,11 @@ impl Tester { if self.config.trace_calls { let mut executor = MainBatchExecutorFactory::::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } else { let mut executor = MainBatchExecutorFactory::<()>::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } } @@ -212,7 +220,7 @@ impl Tester { snapshot: &SnapshotRecoveryStatus, ) -> Box> { let current_timestamp = snapshot.l2_block_timestamp + 1; - let (mut l1_batch_env, system_env) = + let (mut l1_batch_env, system_env, pubdata_params) = self.batch_params(snapshot.l1_batch_number + 1, current_timestamp); l1_batch_env.previous_batch_hash = Some(snapshot.l1_batch_root_hash); l1_batch_env.first_l2_block = L2BlockEnv { @@ -222,11 +230,11 @@ impl Tester { max_virtual_blocks_to_create: 1, }; - self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env) + self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env, pubdata_params) .await } - pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv) { + pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv, PubdataParams) { // Not really important for the batch executor - it operates over a single batch. self.batch_params(L1BatchNumber(1), 100) } @@ -236,7 +244,7 @@ impl Tester { &self, l1_batch_number: L1BatchNumber, timestamp: u64, - ) -> (L1BatchEnv, SystemEnv) { + ) -> (L1BatchEnv, SystemEnv, PubdataParams) { let mut system_params = default_system_env(); if let Some(vm_gas_limit) = self.config.vm_gas_limit { system_params.bootloader_gas_limit = vm_gas_limit; @@ -245,7 +253,7 @@ impl Tester { self.config.validation_computational_gas_limit; let mut batch_params = default_l1_batch_env(l1_batch_number.0, timestamp, self.fee_account); batch_params.previous_batch_hash = Some(H256::zero()); // Not important in this context. - (batch_params, system_params) + (batch_params, system_params, PubdataParams::default()) } /// Performs the genesis in the storage. diff --git a/core/node/state_keeper/src/io/common/mod.rs b/core/node/state_keeper/src/io/common/mod.rs index 6bd881414a2..867ffa7fb37 100644 --- a/core/node/state_keeper/src/io/common/mod.rs +++ b/core/node/state_keeper/src/io/common/mod.rs @@ -3,7 +3,7 @@ use std::time::Duration; use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_types::{L1BatchNumber, L2BlockNumber, H256}; +use zksync_types::{commitment::PubdataParams, L1BatchNumber, L2BlockNumber, H256}; use super::PendingBatchData; @@ -85,6 +85,7 @@ pub async fn load_pending_batch( storage: &mut Connection<'_, Core>, system_env: SystemEnv, l1_batch_env: L1BatchEnv, + pubdata_params: PubdataParams, ) -> anyhow::Result { let pending_l2_blocks = storage .transactions_dal() @@ -104,6 +105,7 @@ pub async fn load_pending_batch( Ok(PendingBatchData { l1_batch_env, system_env, + pubdata_params, pending_l2_blocks, }) } diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index b2a24acb495..ec9f906b1cd 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -318,7 +318,7 @@ async fn loading_pending_batch_with_genesis() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, L1BatchNumber(1), @@ -331,7 +331,7 @@ async fn loading_pending_batch_with_genesis() { assert_eq!(l1_batch_env.first_l2_block.number, 1); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); @@ -396,7 +396,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, snapshot_recovery.l1_batch_number + 1, @@ -406,7 +406,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await .unwrap() .expect("no L1 batch"); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 229f54132f7..dfddd36aba7 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -14,7 +14,10 @@ use zksync_mempool::L2TxFilter; use zksync_multivm::{interface::Halt, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - block::UnsealedL1BatchHeader, protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, + block::UnsealedL1BatchHeader, + commitment::{L1BatchCommitmentMode, PubdataParams}, + protocol_upgrade::ProtocolUpgradeTx, + utils::display_timestamp, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, U256, }; // TODO (SMA-1206): use seconds instead of milliseconds. @@ -55,6 +58,8 @@ pub struct MempoolIO { // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. batch_fee_input_provider: Arc, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, } impl IoSealCriteria for MempoolIO { @@ -97,7 +102,7 @@ impl StateKeeperIO for MempoolIO { L2BlockSealProcess::clear_pending_l2_block(&mut storage, cursor.next_l2_block - 1).await?; - let Some((system_env, l1_batch_env)) = self + let Some((system_env, l1_batch_env, pubdata_params)) = self .l1_batch_params_provider .load_l1_batch_env( &mut storage, @@ -109,26 +114,24 @@ impl StateKeeperIO for MempoolIO { else { return Ok((cursor, None)); }; - let pending_batch_data = load_pending_batch(&mut storage, system_env, l1_batch_env) - .await - .with_context(|| { - format!( - "failed loading data for re-execution for pending L1 batch #{}", - cursor.l1_batch - ) - })?; + let pending_batch_data = + load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) + .await + .with_context(|| { + format!( + "failed loading data for re-execution for pending L1 batch #{}", + cursor.l1_batch + ) + })?; - let PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - } = pending_batch_data; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(l1_batch_env.fee_input, system_env.version.into()); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + pending_batch_data.l1_batch_env.fee_input, + pending_batch_data.system_env.version.into(), + ); self.filter = L2TxFilter { - fee_input: l1_batch_env.fee_input, + fee_input: pending_batch_data.l1_batch_env.fee_input, fee_per_gas: base_fee, gas_per_pubdata: gas_per_pubdata as u32, }; @@ -136,20 +139,14 @@ impl StateKeeperIO for MempoolIO { storage .blocks_dal() .ensure_unsealed_l1_batch_exists( - l1_batch_env + pending_batch_data + .l1_batch_env .clone() - .into_unsealed_header(Some(system_env.version)), + .into_unsealed_header(Some(pending_batch_data.system_env.version)), ) .await?; - Ok(( - cursor, - Some(PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - }), - )) + Ok((cursor, Some(pending_batch_data))) } async fn wait_for_new_batch_params( @@ -166,10 +163,11 @@ impl StateKeeperIO for MempoolIO { .get_unsealed_l1_batch() .await? { + let protocol_version = unsealed_storage_batch + .protocol_version + .context("unsealed batch is missing protocol version")?; return Ok(Some(L1BatchParams { - protocol_version: unsealed_storage_batch - .protocol_version - .expect("unsealed batch is missing protocol version"), + protocol_version, validation_computational_gas_limit: self.validation_computational_gas_limit, operator_address: unsealed_storage_batch.fee_address, fee_input: unsealed_storage_batch.fee_input, @@ -178,6 +176,7 @@ impl StateKeeperIO for MempoolIO { // This value is effectively ignored by the protocol. virtual_blocks: 1, }, + pubdata_params: self.pubdata_params(protocol_version)?, })); } @@ -247,6 +246,7 @@ impl StateKeeperIO for MempoolIO { // This value is effectively ignored by the protocol. virtual_blocks: 1, }, + pubdata_params: self.pubdata_params(protocol_version)?, })); } Ok(None) @@ -454,6 +454,7 @@ async fn sleep_past(timestamp: u64, l2_block: L2BlockNumber) -> u64 { } impl MempoolIO { + #[allow(clippy::too_many_arguments)] pub fn new( mempool: MempoolGuard, batch_fee_input_provider: Arc, @@ -462,6 +463,8 @@ impl MempoolIO { fee_account: Address, delay_interval: Duration, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, ) -> anyhow::Result { Ok(Self { mempool, @@ -477,8 +480,26 @@ impl MempoolIO { delay_interval, batch_fee_input_provider, chain_id, + l2_da_validator_address, + pubdata_type, }) } + + fn pubdata_params(&self, protocol_version: ProtocolVersionId) -> anyhow::Result { + let pubdata_params = match ( + protocol_version.is_pre_gateway(), + self.l2_da_validator_address, + ) { + (true, _) => PubdataParams::default(), + (false, Some(l2_da_validator_address)) => PubdataParams { + l2_da_validator_address, + pubdata_type: self.pubdata_type, + }, + (false, None) => anyhow::bail!("L2 DA validator address not found"), + }; + + Ok(pubdata_params) + } } /// Getters required for testing the MempoolIO. diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index 0fc5ebb6c08..e2461e72d7b 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -4,8 +4,9 @@ use async_trait::async_trait; use zksync_contracts::BaseSystemContracts; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_types::{ - block::L2BlockExecutionData, fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, - Address, L1BatchNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + block::L2BlockExecutionData, commitment::PubdataParams, fee_model::BatchFeeInput, + protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2ChainId, ProtocolVersionId, + Transaction, H256, }; use zksync_vm_executor::storage::l1_batch_params; @@ -38,6 +39,7 @@ pub struct PendingBatchData { /// (e.g. timestamp) are the same, so transaction would have the same result after re-execution. pub(crate) l1_batch_env: L1BatchEnv, pub(crate) system_env: SystemEnv, + pub(crate) pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub(crate) pending_l2_blocks: Vec, } @@ -70,6 +72,8 @@ pub struct L1BatchParams { pub fee_input: BatchFeeInput, /// Parameters of the first L2 block in the batch. pub first_l2_block: L2BlockParams, + /// Params related to how the pubdata should be processed by the bootloader in the batch. + pub pubdata_params: PubdataParams, } impl L1BatchParams { @@ -79,8 +83,8 @@ impl L1BatchParams { contracts: BaseSystemContracts, cursor: &IoCursor, previous_batch_hash: H256, - ) -> (SystemEnv, L1BatchEnv) { - l1_batch_params( + ) -> (SystemEnv, L1BatchEnv, PubdataParams) { + let (system_env, l1_batch_env) = l1_batch_params( cursor.l1_batch, self.operator_address, self.first_l2_block.timestamp, @@ -93,7 +97,9 @@ impl L1BatchParams { self.protocol_version, self.first_l2_block.virtual_blocks, chain_id, - ) + ); + + (system_env, l1_batch_env, self.pubdata_params) } } diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 3e11285e11f..06f1972a02a 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; use tokio::sync::{mpsc, oneshot}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_shared_metrics::{BlockStage, APP_METRICS}; -use zksync_types::{writes::TreeWrite, Address}; +use zksync_types::{writes::TreeWrite, Address, ProtocolVersionId}; use zksync_utils::u256_to_h256; use crate::{ @@ -29,7 +29,7 @@ struct Completable { #[derive(Debug)] pub struct StateKeeperPersistence { pool: ConnectionPool, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, pre_insert_txs: bool, insert_protective_reads: bool, commands_sender: mpsc::Sender>, @@ -41,13 +41,45 @@ pub struct StateKeeperPersistence { impl StateKeeperPersistence { const SHUTDOWN_MSG: &'static str = "L2 block sealer unexpectedly shut down"; + async fn validate_l2_legacy_shared_bridge_addr( + pool: &ConnectionPool, + l2_legacy_shared_bridge_addr: Option
, + ) -> anyhow::Result<()> { + let mut connection = pool.connection_tagged("state_keeper").await?; + + if let Some(l2_block) = connection + .blocks_dal() + .get_earliest_l2_block_number() + .await + .context("failed to load earliest l2 block number")? + { + let header = connection + .blocks_dal() + .get_l2_block_header(l2_block) + .await + .context("failed to load L2 block header")? + .context("missing L2 block header")?; + let protocol_version = header + .protocol_version + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + + if protocol_version.is_pre_gateway() && l2_legacy_shared_bridge_addr.is_none() { + anyhow::bail!("Missing `l2_legacy_shared_bridge_addr` for chain that was initialized before gateway upgrade"); + } + } + + Ok(()) + } + /// Creates a sealer that will use the provided Postgres connection and will have the specified /// `command_capacity` for unprocessed sealing commands. - pub fn new( + pub async fn new( pool: ConnectionPool, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, mut command_capacity: usize, - ) -> (Self, L2BlockSealerTask) { + ) -> anyhow::Result<(Self, L2BlockSealerTask)> { + Self::validate_l2_legacy_shared_bridge_addr(&pool, l2_legacy_shared_bridge_addr).await?; + let is_sync = command_capacity == 0; command_capacity = command_capacity.max(1); @@ -60,14 +92,14 @@ impl StateKeeperPersistence { }; let this = Self { pool, - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, pre_insert_txs: false, insert_protective_reads: true, commands_sender, latest_completion_receiver: None, is_sync, }; - (this, sealer) + Ok((this, sealer)) } pub fn with_tx_insertion(mut self) -> Self { @@ -157,8 +189,8 @@ impl StateKeeperOutputHandler for StateKeeperPersistence { } async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { - let command = - updates_manager.seal_l2_block_command(self.l2_shared_bridge_addr, self.pre_insert_txs); + let command = updates_manager + .seal_l2_block_command(self.l2_legacy_shared_bridge_addr, self.pre_insert_txs); self.submit_l2_block(command).await; Ok(()) } @@ -174,7 +206,7 @@ impl StateKeeperOutputHandler for StateKeeperPersistence { updates_manager .seal_l1_batch( self.pool.clone(), - self.l2_shared_bridge_addr, + self.l2_legacy_shared_bridge_addr, self.insert_protective_reads, ) .await @@ -392,8 +424,13 @@ mod tests { .unwrap(); drop(storage); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::default(), l2_block_sealer_capacity); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + pool.clone(), + Some(Address::default()), + l2_block_sealer_capacity, + ) + .await + .unwrap(); let mut output_handler = OutputHandler::new(Box::new(persistence)) .with_handler(Box::new(TreeWritesPersistence::new(pool.clone()))); tokio::spawn(l2_block_sealer.run()); @@ -451,7 +488,8 @@ mod tests { pool: &ConnectionPool, ) -> H256 { let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); - let mut updates = UpdatesManager::new(&l1_batch_env, &default_system_env()); + let mut updates = + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()); pool.connection() .await .unwrap() @@ -538,7 +576,9 @@ mod tests { drop(storage); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::default(), 1); + StateKeeperPersistence::new(pool.clone(), Some(Address::default()), 1) + .await + .unwrap(); persistence = persistence.with_tx_insertion().without_protective_reads(); let mut output_handler = OutputHandler::new(Box::new(persistence)); tokio::spawn(l2_block_sealer.run()); @@ -577,11 +617,13 @@ mod tests { async fn l2_block_sealer_handle_blocking() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Address::default(), 1); + StateKeeperPersistence::new(pool, Some(Address::default()), 1) + .await + .unwrap(); // The first command should be successfully submitted immediately. let mut updates_manager = create_updates_manager(); - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command(Some(Address::default()), false); persistence.submit_l2_block(seal_command).await; // The second command should lead to blocking @@ -589,7 +631,7 @@ mod tests { timestamp: 2, virtual_blocks: 1, }); - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command(Some(Address::default()), false); { let submit_future = persistence.submit_l2_block(seal_command); futures::pin_mut!(submit_future); @@ -617,7 +659,7 @@ mod tests { timestamp: 3, virtual_blocks: 1, }); - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command(Some(Address::default()), false); persistence.submit_l2_block(seal_command).await; let command = sealer.commands_receiver.recv().await.unwrap(); command.completion_sender.send(()).unwrap(); @@ -628,12 +670,15 @@ mod tests { async fn l2_block_sealer_handle_parallel_processing() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Address::default(), 5); + StateKeeperPersistence::new(pool, Some(Address::default()), 5) + .await + .unwrap(); // 5 L2 block sealing commands can be submitted without blocking. let mut updates_manager = create_updates_manager(); for i in 1..=5 { - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = + updates_manager.seal_l2_block_command(Some(Address::default()), false); updates_manager.push_l2_block(L2BlockParams { timestamp: i, virtual_blocks: 1, diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 7ef466805e3..4fc58bce5c9 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use once_cell::sync::Lazy; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::VmEvent; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, L2_NATIVE_TOKEN_VAULT_ADDRESS}; use zksync_types::{ ethabi, tokens::{TokenInfo, TokenMetadata}, @@ -18,7 +18,7 @@ use crate::{ }; fn extract_added_tokens( - l2_shared_bridge_addr: Address, + l2_token_deployer_addr: Address, all_generated_events: &[VmEvent], ) -> Vec { let deployed_tokens = all_generated_events @@ -28,7 +28,7 @@ fn extract_added_tokens( event.address == CONTRACT_DEPLOYER_ADDRESS && event.indexed_topics.len() == 4 && event.indexed_topics[0] == VmEvent::DEPLOY_EVENT_SIGNATURE - && h256_to_account_address(&event.indexed_topics[1]) == l2_shared_bridge_addr + && h256_to_account_address(&event.indexed_topics[1]) == l2_token_deployer_addr }) .map(|event| h256_to_account_address(&event.indexed_topics[3])); @@ -334,8 +334,10 @@ impl L2BlockSealSubtask for InsertTokensSubtask { ) -> anyhow::Result<()> { let is_fictive = command.is_l2_block_fictive(); let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::ExtractAddedTokens, is_fictive); - let added_tokens = - extract_added_tokens(command.l2_shared_bridge_addr, &command.l2_block.events); + let token_deployer_address = command + .l2_legacy_shared_bridge_addr + .unwrap_or(L2_NATIVE_TOKEN_VAULT_ADDRESS); + let added_tokens = extract_added_tokens(token_deployer_address, &command.l2_block.events); progress.observe(added_tokens.len()); let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertTokens, is_fictive); @@ -464,6 +466,7 @@ mod tests { use zksync_node_test_utils::create_l2_transaction; use zksync_types::{ block::L2BlockHeader, + commitment::PubdataParams, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, AccountTreeId, Address, L1BatchNumber, ProtocolVersionId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, @@ -552,8 +555,9 @@ mod tests { base_fee_per_gas: Default::default(), base_system_contracts_hashes: Default::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_shared_bridge_addr: Default::default(), + l2_legacy_shared_bridge_addr: Default::default(), pre_insert_txs: false, + pubdata_params: PubdataParams::default(), }; // Run. @@ -616,6 +620,7 @@ mod tests { virtual_blocks: l2_block_seal_command.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(VmVersion::latest()), logs_bloom: Default::default(), + pubdata_params: l2_block_seal_command.pubdata_params, }; connection .protocol_versions_dal() diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index 5859d27786d..7f05bda7a6f 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -46,7 +46,7 @@ impl UpdatesManager { pub(super) async fn seal_l1_batch( &self, pool: ConnectionPool, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, insert_protective_reads: bool, ) -> anyhow::Result<()> { let started_at = Instant::now(); @@ -59,7 +59,7 @@ impl UpdatesManager { let progress = L1_BATCH_METRICS.start(L1BatchSealStage::FictiveL2Block); // Seal fictive L2 block with last events and storage logs. let l2_block_command = self.seal_l2_block_command( - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, false, // fictive L2 blocks don't have txs, so it's fine to pass `false` here. ); @@ -335,8 +335,6 @@ impl L2BlockSealCommand { /// that are created after the last processed tx in the L1 batch: after the last transaction is processed, /// the bootloader enters the "tip" phase in which it can still generate events (e.g., /// one for sending fees to the operator). - /// - /// `l2_shared_bridge_addr` is required to extract the information on newly added tokens. async fn seal_inner( &self, strategy: &mut SealStrategy<'_>, @@ -393,6 +391,7 @@ impl L2BlockSealCommand { virtual_blocks: self.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(definite_vm_version), logs_bloom, + pubdata_params: self.pubdata_params, }; let mut connection = strategy.connection().await?; diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 566eebf7ab7..ece5b67767f 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -286,8 +286,9 @@ async fn processing_storage_logs_when_sealing_l2_block() { base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_shared_bridge_addr: Address::default(), + l2_legacy_shared_bridge_addr: Some(Address::default()), pre_insert_txs: false, + pubdata_params: Default::default(), }; connection_pool .connection() @@ -376,8 +377,9 @@ async fn processing_events_when_sealing_l2_block() { base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_shared_bridge_addr: Address::default(), + l2_legacy_shared_bridge_addr: Some(Address::default()), pre_insert_txs: false, + pubdata_params: Default::default(), }; pool.connection() .await @@ -447,13 +449,13 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom .await .unwrap() .expect("no batch params generated"); - let (system_env, l1_batch_env) = l1_batch_params.into_env( + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params.into_env( L2ChainId::default(), BASE_SYSTEM_CONTRACTS.clone(), &cursor, previous_batch_hash, ); - let mut updates = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let tx_hash = tx.hash(); updates.extend_from_executed_transaction( @@ -467,7 +469,9 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom ); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(connection_pool.clone(), Address::default(), 0); + StateKeeperPersistence::new(connection_pool.clone(), Some(Address::default()), 0) + .await + .unwrap(); tokio::spawn(l2_block_sealer.run()); persistence.handle_l2_block(&updates).await.unwrap(); diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index ad189831bad..daedbebc75e 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -147,6 +147,8 @@ impl Tester { wallets.state_keeper.unwrap().fee_account.address(), Duration::from_secs(1), L2ChainId::from(270), + Some(Default::default()), + Default::default(), ) .unwrap(); diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index bd102daa308..523dd8eceba 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -17,8 +17,9 @@ use zksync_multivm::{ use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state::{OwnedStorage, ReadStorageFactory}; use zksync_types::{ - block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, - protocol_version::ProtocolVersionId, utils::display_timestamp, L1BatchNumber, Transaction, + block::L2BlockExecutionData, commitment::PubdataParams, l2::TransactionType, + protocol_upgrade::ProtocolUpgradeTx, protocol_version::ProtocolVersionId, + utils::display_timestamp, L1BatchNumber, Transaction, }; use crate::{ @@ -116,6 +117,7 @@ impl ZkSyncStateKeeper { let PendingBatchData { mut l1_batch_env, mut system_env, + mut pubdata_params, pending_l2_blocks, } = match pending_batch_params { Some(params) => { @@ -132,7 +134,7 @@ impl ZkSyncStateKeeper { } None => { tracing::info!("There is no open pending batch, starting a new empty batch"); - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .wait_for_new_batch_env(&cursor) .await .map_err(|e| e.context("wait_for_new_batch_params()"))?; @@ -140,18 +142,19 @@ impl ZkSyncStateKeeper { l1_batch_env, pending_l2_blocks: Vec::new(), system_env, + pubdata_params, } } }; let protocol_version = system_env.version; - let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let mut protocol_upgrade_tx: Option = self .load_protocol_upgrade_tx(&pending_l2_blocks, protocol_version, l1_batch_env.number) .await?; let mut batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; self.restore_state( &mut *batch_executor, @@ -201,10 +204,11 @@ impl ZkSyncStateKeeper { // Start the new batch. next_cursor.l1_batch += 1; - (system_env, l1_batch_env) = self.wait_for_new_batch_env(&next_cursor).await?; - updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + (system_env, l1_batch_env, pubdata_params) = + self.wait_for_new_batch_env(&next_cursor).await?; + updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -221,6 +225,7 @@ impl ZkSyncStateKeeper { &mut self, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Result>, Error> { let storage = self .storage_factory @@ -230,7 +235,7 @@ impl ZkSyncStateKeeper { .ok_or(Error::Canceled)?; Ok(self .batch_executor - .init_batch(storage, l1_batch_env, system_env)) + .init_batch(storage, l1_batch_env, system_env, pubdata_params)) } /// This function is meant to be called only once during the state-keeper initialization. @@ -327,7 +332,7 @@ impl ZkSyncStateKeeper { async fn wait_for_new_batch_env( &mut self, cursor: &IoCursor, - ) -> Result<(SystemEnv, L1BatchEnv), Error> { + ) -> Result<(SystemEnv, L1BatchEnv, PubdataParams), Error> { // `io.wait_for_new_batch_params(..)` is not cancel-safe; once we get new batch params, we must hold onto them // until we get the rest of parameters from I/O or receive a stop signal. let params = self.wait_for_new_batch_params(cursor).await?; diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index d1e82c44bd6..ad50c8ca8ce 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -14,9 +14,9 @@ use zksync_multivm::interface::{ use zksync_state::OwnedStorage; use zksync_test_account::Account; use zksync_types::{ - fee::Fee, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, - L1BatchNumber, L2BlockNumber, PriorityOpId, StorageLog, Transaction, L2_BASE_TOKEN_ADDRESS, - SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, + commitment::PubdataParams, fee::Fee, utils::storage_key_for_standard_token_balance, + AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, PriorityOpId, StorageLog, + Transaction, L2_BASE_TOKEN_ADDRESS, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, }; use zksync_utils::u256_to_h256; @@ -50,6 +50,7 @@ impl BatchExecutorFactory for MockBatchExecutor { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { Box::new(Self) } diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index cb282f3b7d6..45787b18f3c 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -27,8 +27,9 @@ use zksync_multivm::{ use zksync_node_test_utils::create_l2_transaction; use zksync_state::{interface::StorageView, OwnedStorage, ReadStorageFactory}; use zksync_types::{ - fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, protocol_upgrade::ProtocolUpgradeTx, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + commitment::PubdataParams, fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, + protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, Transaction, H256, }; use crate::{ @@ -423,6 +424,7 @@ impl BatchExecutorFactory for TestBatchExecutorBuilder { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { let executor = TestBatchExecutor::new(self.txs.pop_front().unwrap(), self.rollback_set.clone()); @@ -702,6 +704,7 @@ impl StateKeeperIO for TestIO { timestamp: self.timestamp, virtual_blocks: 1, }, + pubdata_params: Default::default(), }; self.l2_block_number += 1; self.timestamp += 1; diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index 9e971541b20..16eed0b2f7f 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -59,6 +59,7 @@ pub(crate) fn pending_batch_data(pending_l2_blocks: Vec) - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), }, + pubdata_params: Default::default(), pending_l2_blocks, } } @@ -102,7 +103,7 @@ pub(super) fn default_l1_batch_env( pub(super) fn create_updates_manager() -> UpdatesManager { let l1_batch_env = default_l1_batch_env(1, 1, Address::default()); - UpdatesManager::new(&l1_batch_env, &default_system_env()) + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()) } pub(super) fn create_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> Transaction { diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 6211755eb15..b1bd35c921c 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -9,8 +9,8 @@ use zksync_multivm::{ utils::{get_batch_base_fee, StorageWritesDeduplicator}, }; use zksync_types::{ - block::BlockGasCount, fee_model::BatchFeeInput, Address, L1BatchNumber, L2BlockNumber, - ProtocolVersionId, Transaction, H256, + block::BlockGasCount, commitment::PubdataParams, fee_model::BatchFeeInput, Address, + L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, }; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; @@ -41,10 +41,15 @@ pub struct UpdatesManager { pub l1_batch: L1BatchUpdates, pub l2_block: L2BlockUpdates, pub storage_writes_deduplicator: StorageWritesDeduplicator, + pubdata_params: PubdataParams, } impl UpdatesManager { - pub fn new(l1_batch_env: &L1BatchEnv, system_env: &SystemEnv) -> Self { + pub fn new( + l1_batch_env: &L1BatchEnv, + system_env: &SystemEnv, + pubdata_params: PubdataParams, + ) -> Self { let protocol_version = system_env.version; Self { batch_timestamp: l1_batch_env.timestamp, @@ -63,6 +68,7 @@ impl UpdatesManager { ), storage_writes_deduplicator: StorageWritesDeduplicator::new(), storage_view_cache: None, + pubdata_params, } } @@ -85,7 +91,7 @@ impl UpdatesManager { pub(crate) fn seal_l2_block_command( &self, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, pre_insert_txs: bool, ) -> L2BlockSealCommand { L2BlockSealCommand { @@ -97,8 +103,9 @@ impl UpdatesManager { base_fee_per_gas: self.base_fee_per_gas, base_system_contracts_hashes: self.base_system_contract_hashes, protocol_version: Some(self.protocol_version), - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, pre_insert_txs, + pubdata_params: self.pubdata_params, } } @@ -211,11 +218,12 @@ pub struct L2BlockSealCommand { pub base_fee_per_gas: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, pub protocol_version: Option, - pub l2_shared_bridge_addr: Address, + pub l2_legacy_shared_bridge_addr: Option
, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB /// before they are included into L2 blocks. pub pre_insert_txs: bool, + pub pubdata_params: PubdataParams, } #[cfg(test)] diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 9eb53994eee..86ce3aadd9a 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -45,6 +45,7 @@ pub fn create_l2_block(number: u32) -> L2BlockHeader { virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } @@ -98,6 +99,10 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], + state_diff_hash: Some(H256::zero()), + local_root: Some(H256::zero()), + aggregation_root: Some(H256::zero()), + da_inclusion_data: Some(vec![]), } } @@ -128,6 +133,9 @@ pub fn l1_batch_metadata_to_commitment_artifacts( } _ => None, }, + local_root: metadata.local_root.unwrap(), + aggregation_root: metadata.aggregation_root.unwrap(), + state_diff_hash: metadata.state_diff_hash.unwrap(), } } @@ -213,6 +221,7 @@ impl Snapshot { virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; Snapshot { l1_batch, diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 4f7ac1f9728..dbd218c8dc5 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -82,6 +82,7 @@ impl VmRunner { storage, batch_data.l1_batch_env.clone(), batch_data.system_env.clone(), + batch_data.pubdata_params, ); let mut output_handler = self .output_handler_factory diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index 2285455ba24..9ab4ed87b9f 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -13,7 +13,9 @@ use zksync_state::{ AsyncCatchupTask, BatchDiff, OwnedStorage, RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, RocksdbWithMemory, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2ChainId}; +use zksync_types::{ + block::L2BlockExecutionData, commitment::PubdataParams, L1BatchNumber, L2ChainId, +}; use zksync_vm_executor::storage::L1BatchParamsProvider; use zksync_vm_interface::{L1BatchEnv, SystemEnv}; @@ -106,6 +108,8 @@ pub struct BatchExecuteData { pub l1_batch_env: L1BatchEnv, /// Execution process parameters. pub system_env: SystemEnv, + /// Pubdata building parameters. + pub pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub l2_blocks: Vec, } @@ -394,7 +398,7 @@ pub(crate) async fn load_batch_execute_data( l1_batch_params_provider: &L1BatchParamsProvider, chain_id: L2ChainId, ) -> anyhow::Result> { - let Some((system_env, l1_batch_env)) = l1_batch_params_provider + let Some((system_env, l1_batch_env, pubdata_params)) = l1_batch_params_provider .load_l1_batch_env( conn, l1_batch_number, @@ -415,6 +419,7 @@ pub(crate) async fn load_batch_execute_data( Ok(Some(BatchExecuteData { l1_batch_env, system_env, + pubdata_params, l2_blocks, })) } diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index dddef0de82f..e198be9ea6b 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -5,7 +5,7 @@ use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView}, - ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + ExecutionResult, InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, @@ -113,7 +113,7 @@ impl BenchmarkingVmFactory for Fast } let mut tracer = InstructionCount(0); - vm.0.inspect(&mut tracer, VmExecutionMode::OneTx); + vm.0.inspect(&mut tracer, InspectExecutionMode::OneTx); tracer.0 } } @@ -144,7 +144,7 @@ impl BenchmarkingVmFactory for Legacy { &mut InstructionCounter::new(count.clone()) .into_tracer_pointer() .into(), - VmExecutionMode::OneTx, + InspectExecutionMode::OneTx, ); count.take() } @@ -191,7 +191,7 @@ impl Default for BenchmarkingVm { impl BenchmarkingVm { pub fn run_transaction(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { self.0.push_transaction(tx.clone()); - self.0.execute(VmExecutionMode::OneTx) + self.0.execute(InspectExecutionMode::OneTx) } pub fn run_transaction_full(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { diff --git a/etc/multivm_bootloaders/vm_gateway/commit b/etc/multivm_bootloaders/vm_gateway/commit new file mode 100644 index 00000000000..a3547f57703 --- /dev/null +++ b/etc/multivm_bootloaders/vm_gateway/commit @@ -0,0 +1 @@ +a8bf0ca28d43899882a2e123e2fdf1379f0fd656 diff --git a/etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin b/etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..fb6017f69cf03b963d490070a1d33555531e5d30 GIT binary patch literal 75296 zcmeHw3w&Hhb@$wT>8@-`vTVtcELm4XNpK-xnA?A$)a!QUbK}14;`8VgoJjQeNdz9xeoE3*}h|B!IvFIcMfRX7AnA zYF7@wekl3*?wvDpX6DS9^P0z1DMf!N)vup;q#f0ZM=8A~HK~-_nO@`Es~n`PO<#+D z9mjL-QEG9kQYSCN`-!Z|(6f}0XCW<5uSK3fo_Px3vpIfumpLVMpHdxPQ|c&{JMNhD z4o?3MPG1Q~P7QFqDLj=rCHbfU>S}Ib8tK&TEW`IJRR%tHIhtBTDyqm_pnC9ZVR%MO zW-iCuuv){<4XVJ;-D)d6-A+>u?Oo3O_NYA|jx$U2IN^Ay4y1QE9nOc*okbwsNar>W zeap0~KICxHJMeFxqtZ8WyB(%i$>|U}st)J1jJE^y^y#RMF&(KN)F0(`Q#(4Z^q$0c zmAeA^=rzl@$dFsjZ zOsA=SM>!qpPJUk|>CQ6A=M z`79?$ju73Y9jgg%=jOzDCi5VG+w%-;g?Sjj?RoY)HSqUaXXvrWYQJ&s~Sv z0=c;Om0Et6Al-QWrFh-|&T@0{S2cW=;ce6)9v7waEz|i#-a`+xX*}sRYPb88)Echm zry9=*#^VNfWS+X`rwWvB3CMAj`1jNjjVHT=@tmL9%5ZQX!5zhTKoqMz>vG)gCE#_= zcPdACRv;hgA$sJPp8dHJ&5zV@bki0OLV&^6b>`3Bt*=}BlSnOB#i9MQ?kE14hp zKLLf+;>Bh@<&N+?Q!RfY^9oHwTa?qH3RKQ%nPj~S4n9f!Ot;LE+;SA!aVNn+8lHD8 zheaN>+~Hh~bk8kGy6bYg6yT_rvX_Bp@i2Z9KF^eQU&8%aqv^R)_xHGNziN^GUaIA+ z=C2gBV?=cWljvL3B6R3SI_QPuy@Yqk*@jLnLeD{$`{iI>Q+w3qnyvwz?v2u^WlZRl zyI1Ix({##dI^~$|i+wufG@a1?2%T~_3!QQ=(R7-O(g|>)(|H!1hI~5RVdykO?Iya= zczL@0D#!g@?9&PD1RYWy)3FOVWBCfnOy4DI4V{Fa+>0=d#OJ8%DAkvBOEmr%w^?;D z(yuanls~Nb=uV+eJRdE&R^vD0SKxNT@<|*&ruUKqrd=(0;qz7*zvr6qb7fw5i!?tj z`n%}(xv!D-<^N9Fm)GN$*W;JJC(7sfs`XXe4sY>sp_6-eh)#KASWI+r zFLzIn-QyO}9^%^P5#PKb((g`ZgHEEo&TKrq zZJmZ6Pk=vF!#@ff)bCr;(%x_C_Wn90@4wXVr{&$W|MY2x^|1puwZCJ##-p5_M2GfQ z2>#!gets#TecOU@I7)O-3F9#9#E!$9>3kx-e0#gl3H9cfZqSJ|uJAv!oC@e%M0(rP zr62Bk;3NLs066%!1M)50e&@Ru9@yi=Z&ZGW@UJ{W{B;8DBzXY76}hMRSNaVhtlHE7 zbPc8&o{%9hNO+!c{=gpIg7lC*tjHc+jk)}D<)sdT4z?eHK@ zO6ZFIY5C`<(#ZVlRedrpz!&vjK(^9|g3v=nM5h+VOF{3r+wgA@?_#%ts$w^~+au}7 zsnd5v(wF+_JJAouW6}Luro;9`KSccf03J{00B-vsweT-R{i{2J^3BHIvB3D-8^nJd z(I2B^;uFSy${=1r_A}E5&td(}{lWVN{oYFT<9)Y&=l0 z5zPPJL%w3?)iwAqfXDMm0Jr(@h8p z0^5U#i`@g`YrpPCS9SdEMOpTn7eCGT6PW_>}uIpK5)uU?R zPicCkuMm2rHNDcBUWjWGy>>GGrOLy9u-V%GR`|qy4)~xLl8d(fllaa2<>qs1;BQba ztEIxPkT)FHa63W2B8_ha=+8tWIj7*>He%0{Gbc) zl3612S?CwcXTZbqM@8x3UX4(78zPMx}*p{n)x^VzDWAM-})U8nI#e0)&TUun84 zXKwZQ^VzCLrJ93Z<}=Ch4vLGke?ape@g4f%-h_4+L1(e&eESUV93O3e3#Q3s3QVP3 z@Xca8DV|04a}G~1BVc47whniwkJ58p!J*Y3>ObUi`#*`iI=90q5PzK8kx|D-)FNca zP&s!u_@vmmBT>(aT^+#V^|>hm)*ToFR0k#^P{X!*4C(8jxNqO<-NQ|L8@uQT7;H+v_?vL1&S6A4TmK_Q4 z%<{VJI$vHBewrWb54q!LSF!UMiF_;L9>C+rJ%C$$3oldj!b;S$n&+EdFHl2F{}r50 z%@Mim%Oz*6$R$Zn8#$XXdIRfnu%FV3?Ln5;#80YQkE33X!1yJ}pDVPyo|}9F&z>aw zUDffs6KP*w4^e)9U2=|cbpGv@SM09@=;*{mgKPUze~>6)Fku}*@>^EXF9TvqwSEU zGwe4-a-aPJMe8Y$W1UwIUa#$2yh5(_OFI4~J+}{h|2A|7u5+A}OXblH^ppKi-~P=S`!^fbOPQA%ej3`T9-wmTwy|DSut%Z4+R+!t zzb*jCPHH2>c>4PaoC4N$M_8|D`L6xxIogkHe=qXYDKVa9z+8z3?P&Y3(9@Umiyfvj zH4EcP^yj!hwk-a*d&Hu9H`^KA(6|diclY_x`kFqjVjoaHWxl_F>r0-G;?D-~cs>f? z_I!U~4gA8VLHRoIt8O@z;8XYaogb3D*8VkZr~Lumi!_+xucEtbhqpaRkH63Xi%UWW zCH6&N$FV)x`t`o=t9JOUhF*)2TcyFvGLQZFwU{k1tO-|)m)PxrJhb~qal#QFk8*AL zy{HEL8Xce68u%OZJBYlHYveb9-_Xw|vb+k5SfCw6_R!rzYi0}k2q_#0lU<>>h$M}>boHfcLDtMR(PJ4X4P<)W8T zMcdyfklxX87bWsbwf`sM!Q|0$coF0#%lq^?DGxbE?LfDryu`&iAh{vO384LxIrWG3 zf6({`aWPw8)r!~K>m9eF|3zwtj>D3^8VTXI<`LFySWYZC1;38y0X@!ot=rj3{-o;8 zs^irEF1%xYcffZqQa^or-PsPh^dLp_1AMfy&|y3Lv>w&wb*T3N)-%+het~Bj>H*&N zpE13t{}LZiPFD4X+G*1vU%emhO8~FtZ#Ag*-8%?BTC44+S-6=@FJnKMb&%mX+Ed2g zY$f{#aX`e_AWwl*uVWx0QSt=KJ5SV;&M-vn$^L)6&Y%$I@WOFP_a?}XBIu&h#@_y2=F_(@|B=5&@gB0LDIQ7o z3B@Bx&gAe!{BgM_amckM4#|2aAr47$EgXjo>=j!+iMt@pw{%hsIy~J8W;!c!ur!WW8(a)p&Xbz;C13x{g!uS0Y;c8Afch7QbEfNz+i9fJ5TpU-hI?s0aAiRa95KSkST^>}LjAbvSXd^v3T zq2tloj`HmSZ;tJ~r@r-5x8i&R2_~W^@8=+P)poDg%fjFO`f88wkG-DSU&TIcP+s_S zF!QHYyxzmSWcx_s_5MCBiHk2r2U+f6*Rv=6&B}R$a%wB@t0}{-Cpxt0`L1%ZPN`DY ziF|H>Jx+9L$rbSY?RtPOm+FhxpPBUl*h%2y@cOOxuY>j5v>D&9y+nR4*-Nx;MRpRH zitVIDc#>ZR=0F;{BgLqsHawyQVw9YQ}%o|8P;f>@f>CdHUiGP@Ny+x^l#917@zX$ng ze#4LTC_nvxJ|?jNyNSk+-L41NgN5?^`mM4DV^3*)Uxe)({{f0PX#eTj& zx7HOd0>cqk>bOiN)&a`UA3!JjVO-b))c>5eGapcD=rUd>HuZ7)Iu3BUa^FaFsUo+9 z|K@LpN&mRTL?8DrxV~gNTlgh_$Ma(VxA@VI$I^U<{CD4E^qJTrb?cE7hF19Lx9h%$ z!Kh%L&Tpoi9~OHhm`@nLBFzscGfVTC;zl%k&3?>+>}zr#Fm`~$`_O25%DjY}z`Vq} zls_T)fsgaek{%g>JtXldC@YNtk)wOwA1|kwQqG;-n3&b4-udI0p6MJ3G0y=kPPE+;m;uV?&kOpuQRx} z>2>J81iV_@U)mk$(|A4$;5MI_xQM^c4yt~2>lZq2kEhn_eRy-R2dUj+pAjAQVZGqXKA)dYa-H6fUIuybM7ICB zeZTuC_w({ZIU;^{0FNKv0B()1UIz`}wj7alqX1qfezio?jTQ{A4)O2jb^p!2I$Ezm zEDmJicr=lV^(6Mkh<$GMMe;f*_|99aaMn|;Sht4P)$}@k2lj-q{QYaDTe5r~EX?C>~iCjBjU9 z-oj7S!_J|2vgj>&59Vzxx!NF|+!xjuhX(b%ImZJzs@FvlVD^j{z#(J{bAwRPV??T^xv}2#o=iO}iVE;!R ze%$@U7mqRhRTuYr1=Kc4IdnEfz#r}gDPuHl{RlveDME0Y}a=>~f3WBtN*X)D&bLUJy! z*J|mR2K7KqJ;Uop4bVxiJJr-P!Vg-Gp+2$a`Yn4d@at^87ke&%$B$L_o z$@S%kc!+%0_tvD3Njsp|D6V4dOI!LAnMbgy&AFMmtiRKKFR91Gk^J?2y>Dj;;yB!n zc31Vber8pNu^U7lsBYK+)c@s(!f?BtxyTjb8}<`u9yv76Xx{|a6Pb_Qyi@$V|B3v? z_6a)9{sNwe?-769VE9YhZ}s^Lz>~0VQu_VoN66XtvpZtCM%vaRx{D+SJrhbI{G<)0{-G3=^HyAg2+-vNQ z!nK4V2dG^v5A=Em#fL^ZH~R4bn;$^J4CasfAo2;_CB7$mDme~%BC;;EvBGYtb6>=g zUkaa`OMce~8b$Xgtj7I|4%XU8&QZJ3FZY8E#m9P8L3v2;S`b+!^{Mdm@QmrGizK(JJ zqMlbGr?yp`zuRKuDdaNN5^Q-nYtD-qc}edi4=i~(XTCH0pGqgF5F)P5d0!2=Q&R@v~fAK>Tvg?fmI;U1%O%fKg$3<7xbH6ZaX$ydwQC zdLpm`%sSL=4LA3b-7fEQ=KJV9O6Fc};y1aM8+hxvmzx5;Ws<{RiuP~L?TPAJ(;nvA zG;|ozsjl{1tK0MC8vNd%oOJ*7H>AI}>;4X#_Gr5{4f)9On(vQM?%5VRW5K_+u=Ha4 ze5CW~N%GSDy0p(dg?1G&?)n^)`)^je3y^PjW1G`Ev_AZ%1>X%g`i*@OjEBx2%RUDu zHA`HMWqjHHfPD-1@w=p-NYOnw%HKz!9J#mWA+5J$pKRm&`F>x|iQkTSmm&L(^eS|a ze;@pLQZE2(x4KW#{e<1f^1;J9jc-=J%esqFXKQ)zRLQ^e*T9VV*>wDuH5otIi_{;} z{!5#T-=@ze7a0F1nv9?Qoe+H{n~dM4&;MCq{I6>=ew#k`HyOW8pW{u&Z@2%?YT$pV z$cK)%HferS{3Sg9K3AjudgYUi|AGj9()VJ&K7rChdcyW^#EuN~NW2{xz%75yePRv# zVh0E1>%`x%oTdJS$KlBlIyElGX&-7B|Jo+wXTK;Ur#{9 z`?TlZ{)%yaR@C40_xTMv(eIC~%<=camVD3nv1##R&3DLDc9ZPupWAoHmOKP@X8#m| z6LHcdk7~T;p1+l8kH1fr*5?}>?7M#uT%?-%{jo~^JjwidSt5T*{3w9O^G^V`__Kok zAE=N1ZJ0?EccbaV>;Lnle;ao6gy~P;V`IE$i2g3w+eLIk@1I!ueNZMYUh$zO`#ZKB z^K@xP)8ZvI{;Qje-_~=_h~jq-(0k`qiTum)10H(sUB zN4CF@`G@ojonxi-64E;~zeVpHWV&3DNEh+{0(d-K0=P{VDId_y)@yP;C4k%I?Kt2e zOD>pwIksHx6S>^>XHAj|_eSNyQX}t;T&0)| z%dcuOetVwW8^K@6-~WsG``SeQCb_Y?Gl0kQcL2Be)2)SH_&+FLC;p1@cv+qNf4WKh z|8SK5+i}lZL@)lP^1r< z?cbWGe6;z)mR~nFr+vQswedf%LHxDk`{4z~f9C?@|HB&i|DnltupJM(vPS*2^rOvR zc0BUg3yl9h8$b4+@wzJfM%Xm&d3b&pVoxdFhv_~^uQxk*zZ|Ja>#47{@ir(o?Kt6U z7a0Gp8u;JaAYav*mw!^D{#txx z@3ZBSCC}d+mFKv}WsUPPlH-{*bpJ@lQR5Hb{7NsS=lFgQ+$d6(I3>QneF@TW--yH& z)gifm1owi_{8YJ(d_Lfa@dt3xG@Tc?$M^&0o>oWSKceqZaqykF2dR8k-#?N)Ti+{k zw%ju!`&{vzbx-aeafWpIkmPf=Iq>E2x18Ez;$$X1Rx`ds>+P%O9^Yvz&696!xn__1 zd#aDS`~IkZ=aRipgmT`9eNk(u`wi+@vmWu58ttg1f9!sC17BEv|9zA3+w1iI*kt^? z{sHnD`uuGaKlY(%d&wG~CrLgGOZ+}GTWb(5vg8O6}L)za-oADDZz(&xy1R133iW%Pl$ ze_88=AkHo8R{EEB>^+-%l zUO!xVTg5mGFXTArICVhxuYj)ABY4M#ozh=piPP0G2)}MX#$?wjyG~e|_<#)yYg9YY0s?UKQ?|d#jVOOoif-%n3 z>h~+6??``+&JQ~R`LC*g;~XR7yMf_PuYm8+@Xh-9EY7#3A|K8%0sl5TodjOy$!N#* z48JxC$9|w_`=4dK-(bBz$9jLRdDr!H;YmBQ>6jPo&!%@dPxwwfPxSeeSqJYUe19;* z&AGNYp971W174Ay>p9@gW8rhaE1aZtPibEW9&0y;;FjG?-IjZ?^u3XvkMwcf;rJzamIBUGlDcqAhqLv_C^aKWi`7H5!V7Oob?L>CIr#y8Ur!?;dKRzJ$ZKV|I z8{ykBbo0h%`R>&H}gz z5q7~Jso%XQ&G%n)oz(Zrn0-`qe<#Tsm8!V^vw(3k_u+Sa(yG6}_0u)j;rfZnB0tf; znEoT0jHyTR72ol$=X>2127z_xkwqh}!Epui#eGz2Ve)IcJ|Fmr}?w64IZRoZx%0 z5c}vpU&}4CZbb4I^P>BiLN~X%{AD%Ad@!uzxMIKg_YKThk5-q%=&r3#)f)X>Xwj=$ zzjQxakJ}H?|7!Myv=f61S`LKh6fX~x?F^zb&jXe}WJicRGWQ*0n6S);wu9ezD~R^CvuMp z_{{ol2C1bo%^Q-dmtek_ei=QjBJC!--j~C)uC*LKw=Q2*A8Ng!l=+^ktvAfQ**e|7 zSI+Cz5_0DT-TEUoiJyXV@?ns)-T6%CE^FSGg}gQpVM+YtG_#u z-AHm8ay%;MPxx{k>#mS1Pr=ko{w`}8%_g}H?T7wVlh+V`bnkihtMt5L`4y5|mV9s8 zy!y#+=aul`9PO(Asq<=SbY68mLEBZ3kP5M)t|Dfr=!-KW?;rjxU;fYH%aoD#72ku1 zk@tLGxY+MT?t>3WpK1G@%>C+eKP&RQfbR>DUA{X-ei+I7s9jz%c6sWG8uDJgizIe? zExAwapPSq#dkb>E>-lEBp}){6{{23`u;T5|3+cV+4-7(@m)OUo<4S3cD|O+d2hoMx zOXi>Y_c2{S`nU@oYLW0@e-hD+3=vB2evihVMchmLowc%Vs_Ez;4)!4DPg6foZod1O z`1=7=AIY6${cex*0_k-%NB_jc;Vzf&JavN>_cI;!xOu6L^d!**@rO(3nc?}+{Q;$h zD4&;MK1Diz_ryxGlk!wo2SG&EM(j+GF?&{MU>2X0rw1gWNW0 zpL{PXOW(_4zEPtb2MYLz`_+YY6s{lVbD7@`R_Cj%N>luX=%x88Gc5bxGQ)G+Cs)*Q zO!+QFb$e_XuVkDDzI$x^G0;`ZJ0;&o?tY=6mm}Xt&ROt3G;qn^^%p`<`+iK-Ve~vW z?Q)RQ+%Ih6t4ibFpw~6$>TeqVp6|)@;~EfdaFDOXzokfg6{(KIRZSe6?Oo&pWI^sZ zd!5)5-CsflG|$MN2+P@e_w{9fkNDB-i@Se;@lQ4xKlvNs_8)IDew#k}o~owN-)_IY zhs@x2`%(oy$MM}%#pV`KR^dqeh;cnSHsn-24LXtiZ`kL|`=z>v4SgVQdXaCQ_Xl+M8~R}2$c}(aV7i<5hO@Be zgsHyRd0#I*HZ_SJPkn6Z;qRl-dZhc83H14a@JqMPPl!9R+{C=XdZ$}sUw!v}L$Ai> zSNCp1FFn6rVEE_E&M#1@F}-2St)JJByEoeOZ*u)Vs!{(%mb|Fpe{{dO;qhkwqx*Gw zyaT)OG4Vf4ydC$%7WBH1*=NRfZ_@s^_Ja*kH1h?YLO2_lvUpgEwS+P~<@BT{U1==Cs$;e>b;YXNx!u)~N z^Tc&2vwsVpt5hRBZ}!)v6a*vL7p3{_r&)pZEw(3!bZn=@e?J;E#O|kAfpIFb9Vq*0 zRv0~hruWl04*NwbHizOtv>O5Iafl4ill2dtY)6@K>RBiKx4*M7Yy2wNPgDOm@%M=_ zPSE*P);Xv-(r&D&og_Q-Zdmu$Mo{T>Chu#q1D*Zl~DM&qtbD%uL!}%J%p6q858~l!K zRt0Jg^dzm9^*meXpYp2XcQ4AaKI^&J>@zWT)Ew)X(fq0Dokz4^FinZy-W1M8Y1{Lv|mK?m08c!>x?VDqveZ< zyKF!_3ge6QLFO04mCZi%%q-PY(DBjTxe~Q2w9Y8-Qnn;YPk$L?8n&}qEQM+IE_{GyL zC~wgX`_80aDYTpV*MfPtAM$qvt@Z0n`zQFC*anl8BzmA(W4ZnS^`!=CZygvy!&;AkggFGM5&o}GmTjZ(d^OJcW zEg4S`I_x(AdBks0?&FO9Vf!o(IQdg)_8RN6BE3oeBIR1c>o{5tAWp${1o>mLMZmQk;TpdS%WZ0; z?zBN977iC=~HOux3%7)CK5bbpTE`fbxeDr&mh(*$qxH(jqfq? zw|XAY@W=Oas7vwA zeu0d4->bErywq94{0F;H_%Or#(05qV$zS)(8b5&JBE$e<*GBz-zT2gJ`84e2`_NuA zliGv)SzebP%(1`F`vgt*%Z zlY~E=W4qQ-uzT;6eA3_CURkfr9dZyaP+m(x(sBL-`U(cnUaD8+!B3!Xul8sA`pi7b z@pp;)-tPBfBe%md?auAw_UZC{@7HvjMLT$2x9~p0KG-JAcS_d5`*2?)w>yt1g?d%V ztXt$)5)$|a4?%S#9pAo-+=s(2thku26?$>MXeHl#i zJm`In84uHr)Y;Z}=>4eKjI<-Azt@ZkxE(3o4oJGlxX5>_{Ci|zh6?>tXUVu=pFfXF zn*C!8q+VwS9OKfvTiWH1$B^cG+BZP`!MI5OQaiaFUe*)3rn5^ z$>sd{gMC7TuVmi3k zUEb0YJ)nz2<3#o$$r11y`v1`2^^myj^v?b*m;gTp`o66F+ZJ=qO#IuvFDCYv{o4>c zzP~|vtG(jihRWOh6?z8c>%^~8zW$^05M(#8f7|de~(mobCzdr)> zdY?&tG2V-ij~~_iMts$XzqiW5PyTZ=@zegHQ2Sl7f0~J(=35y5Qx_QjWl{Y8KFxaf z4_Wtu0E8_Kx598I) z`gQ+}+Mh$?tM5l%d8M{1dHuToIf;BA{S4so;}pOxJ|H$Ee-uxSGv&K#(6zyFb3ae` zsXo1L!>p5>DY{N+y8a9IGoXvj|I*I@-Y8wk&Z@?*4bEr9ei=ql>r!uefRyPIrF(*AGG<<^oQ1! zL-LX2C(F(JiUl4&-+$&fyid=>J;Kfh>zua(>ksMJiSD%v?>8_zAq!`I2ToZ5brDXdnj(p`TA2j zU#39$z&Gd9z5s*=xt+7QG@tV!H5bN@e7wJe0uLNVO(WiTHpVf1~23KfS={kiQs$}r#o5RAMWb?(zA4qo$&hW+B&|KTY~mNzkt_6 z`-Ob{N_<81;<(3asgYV=I}XPySN=-N36B5tKf8wf4dC(P9l&k*Ys+t24iS7L#7|cF zJo(KbxkAU|5Far2cqrG#*P#4XRDK^me}`DoNzfMK711C19n3-V6m)wc(?1i9FZJJ$ zNKc3y%WNB=dN4>`UR;sd*1HXp_JE6`^a-Te4c8tta>MeO@%-1sv8pq&_B+SkD2 z-H0Eb;0MI}9pESS13(9P>ihp-SIK)xp42Y3%k%sz#p|^h*xNK-6hEgpI@#wG9|WHc zJfitjx%)7FMJ^?J(cF`f5xod~*n#r~Brn8%pRbB6eZb&@i%gsx4JN&nxXx1jUcLTz>+vF%BRqAkxAbEji~0wF zkfn7f)BpaDXuOZnx=TOKNuG?83z!bFeu_XJ+Dqd@`6~Pk^BQ~~z~lKcfLnae@(=#g0Bj_O`g?%qL6F~;(;Sx%wP!x{H6m}xzZ(HRne(FD zUcATO9~?J+>Wqxv%Po25IV|t6?^(+|j;EkAEXN_VwLHeF>K%C0U+=Q_1)gcUIo)Kt zndA}K$+rEB^U`b|yJDX>g|yzkVb;&H+Md?;a^n6%&YxsY8@ZJsI~sBkydAQmS>L30 zpdC>=+BJ3_)Dqj#1NR#_$abREM*|OQeVa1*W!-V$r-}2F?T!#Ue%=J-t$8YTVW_-4 zFWK%4!Ry4&`>6JT2kCDB|Np-3H|Z7VE!fTV*n?2m!yW3~jF0RVyss;$NARTk7fHTC z53qb=yD%hA?0(U@%jyt3zF&d7vgoGhzEWy`&hhx^z#qrnm4VNN?8A^eLeALpVR8;5 z)DF9y)YaY)yxw-Ad^nCp@}J}}iG}dGq-mcrcGMvF^kqn;^S_7u-FYiT%_1z8636W~0YpPtdrX)OgK#W9;i+$nV9Q^I}TNg}I#@105{7VLgY>2Ldp+ z^_&E|egNhWj|=f7=^?WoZ|>dWb#kN+raEL?;HQVj<)Jx_i&An=Fyb9zC(Ayzfv@`G z!RI&!eys7O_5K^#Z;1xzIEKQxnNQ&rPrlzHlj)G}e&Bq}B}66YqdN(1?*B;X`#`Xd ziQ*KTszr#+J=Og<0sL2O>e4l!L2>6Pt zmL$qcv`qDe;IZ>51h?lstpks&YV+~iaB9bSfO9)nwOj2Y|B5s->4_F4ewV&y7J?k} zi2@XS9?G1T@lb^KBj#`6_>kv{o_}b`J=)LtBJID*Kdu%1X6*O#IjT&n#0O6-5`I$Z zJ)Evu-$eP<@FIaL^%hRwAo=s&B6A)~^1nf+7yR_6bN<^iru_N*z8m|$h;Nm82fb&# z^gZ+r?aJ}4RgjBCk{e!z)76LaoIk5t1+L=+c-QS+lxO*(-r6erZx?+$b0MK$d@;$3 zRrt;LRQi>YFMS)QpAz}+oJtFw{C#qOgT53HBe{ojU~8nk>1m>?IsdilyZ-qvUEV+c zb-R?WIR7;x=b7eqE^F1p(mtI3s*2yeC|@9dZV+PjDA6rj-}$e>c3n@QdOa#Q$5vNA z23KnQCCqakcmKTIF`<)wm;0L0fz*_y(>e|3eYi9k_#O6Wf$%sfwvPtSW&G$p46XtE zt5Tnpa%p`(89+3i2scSN+-r3Y=!LO8N%X?~7(fRy+ZI^JB@N(e` z*^f53Es<`#&n^Uy?^jUXqMLu-D&vSws_Pe4#BnjxRIdMa<8S}l_ zk9IcRi4dMBjH<@Ba6HbpGoMoaI7r zA4L}LDdwxrk>;pMS#kds;t%9`yBg3G*>OtKkKt)BACGZ#kV7KS!n4l6)Wh zvexU_H2VqgE5wfQG6kO3D$D0A2EUDJS6p2+`>-bX;=G2+`57+kg=iOFp&K%Hh;?U^S)Y$IPItciA-nHXn$IiXv1&@3&^PCsIVX~r3IN{i(rSNOT?_<^L zPxoy;UQs5LkRI-;4*$sU6Elam&QMS}>2LJ#=&pk^qe)=3>)_PX==jXW!;_=CW=41K zJiKMk9&oCkf9CLp@=V!E^r84_Z|Ha2PxpgIj>n~&w4H~&-KfBu;A*|GX>V+N*96+M zD`Nc>A&{)#lee(FUG9v?nEws*Wdb8srsdMs9( z_U|e0nwgk7vSD;`I*f+4^;G-YKT(}Uef*QijZYp7XzRnLMyE%o4vp^K zaAdrEU~HGE^QQ7{@1`R&qto7~ZmEIDbNBPB*T4Juaa0L#R1r<5fBCyNK3D_(wegAZ z^F|L$&K&XLJFsVJ;s996+c!EjKDytVo-FT*j6gN~q#bI&9iOZTm6%qo@z7H5`J+>a zT!;TQUbS`Y1s9BXljW)Mfobp7sfqEuVfdBhX>W3L%9}iR6VDS&kVu}O{&@VR{1Rp!{Uzq~iX@Zrwm_vp;c>&yH1Td8w=ZF^^-YRYr*@3a%#ipY&Axe^chmlfUHihyJ@@{9#=1vce{Trq4)mX~AELF0p%pF> z{e2L#dh680At=t>TZMAog z_nn9<Q4sqSEhqhbaR1E3-t@s;yGEytPN;-O>3mapdQ>x@k2d^6+qa2X4jkNn zvA1`0X5-Y}=^bE|8BjAZa~Lsx!~dE+Fpf}ImA|RcJqO2kKOSU^r*h$BdsPvBWV*WU z+c#OAKf+Cm8>D+0L9Bfiy+=`pW_Oehldds5(4BE`i|w*U51#iX!FrDW_}Le~^U?d>bMSQU zE#LfcIkR}=ulIg^#d)9a*?Pw>uif$I?*f0?ESd?UF#JS%KdvQ!i+;SWynhU8B;uTe z5ykxe?nAHtLfo82+AmFZ$NWfmPL~gj?mYZ7sL;`Anh8dm!JRooElR*I$v=GGgPaM? zE?#j}Yz<@6+_6{~C8GbN|3IF20!))kCgGeQly&{2K=zD{kD(h;6AZTM*q$RhB0449 zm_Wb87FTcD_E&Mk=A#J9w;346Tc*bLj)m;H=k2RLf%oj*mC_F%v$7Fd?`_t*{kp_Xu-?~v>l5#@l4`w1 z=2y^vvUI#1M-bvT;H{q+pBX(&*7`s#Y*pat;f~VsDsTd2YrwA^-#0#S>o~bhGe;&X zXZPmfj-5Ad+P-n)jXO4P*;xab1&00MCB*N189rS-Jfq6fRmG<&S2w@aWDN!7$Cvy- zK(rG+{6P6I9Mg%Zv6&;AMn~b3f@6_23=gLpEIu59ByeP}K*UPaco-g0>>7a&pRocn zHk<-Urqa;!!NG^T&}+rdfIrE9M6f_|m+7|4qRv55Q{k-rIuDeE2^^(8+`E^zE9J};*4frV?SKc!LdoyHv zYEkHm!FV*sc)70Bot~y|9A6or~_k>S; zhc#DU5EqhTXyf4RDTt%mz`clUk28a#d1@O$&sG6056yH)?H62EFm zrM~f#o^Re#!+w=*n8}FJ$smU4$N|IyIB}+`sZ)sw#^p8NcqlpeQduIB^7Z?=?u^tA z^D~kGFPnEQ|Dl~0e%EFyELT1M!oV88fBNlLUs*NFv5TuK$72_l)`KGvxtJyclq?Vl z(g#ae#IXd2{9ywHlO5~9L9Mj5f)%V$vcTh(z7F4VYgGgB&`lL23y|a2=&jZL&qFiS z4FuDtke;0IZW{B~C=SI&y@DnjnmHJ}#QZumQ{6v2bf9|wLj1Efj(-!>w%-ym8zKtX zhsY0TZ5FSv-(Z_WOnc5?;FX$P$_M_%*v$0k{)(|oFgyHKK=>W3Td?ZV&$;MaA5Ywm zOTYY-ZE@))?R&qt;V$z#?s?xeJ8p`4^h>aI>t?D`&yxsj7eF z8q7%%tpnxpBd~IKeVZ2wt741-AG$R4?oZvn_uC(S%c?K_>*;I1x#Gf|*R}6DG&cO~ MqpQDmPU|E84^E!&RR910 literal 0 HcmV?d00001 diff --git a/etc/multivm_bootloaders/vm_gateway/gas_test.yul/gas_test.yul.zbin b/etc/multivm_bootloaders/vm_gateway/gas_test.yul/gas_test.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..c1726d8301ff2ffc1e8dc3dfdf1552926080a3a3 GIT binary patch literal 71392 zcmeHw3wT{eb?)r*+FO<_$+9I&vSe=&hv1eF+a#oN5^_##ISDmHj-908Wt~{Zwy-V9 zlI++4ief@iXrL)j0!a!=NJ%I(q$H(nNQix*J_4&C{`K;XgIk}>-%nKP`ipx*o z9_8IT)4sY(sg`3(-AMJ7o1uQ-d7huYji0BvCd1Py_1mhIN-~}`N)_q3lk(MDQ17mk zkAAC94s7zBE_$9`r~0UH=6uw{pPQa0{CB0tsGL$8HmI)wpQC(rkxJ7O#@mgbYw0G% z^;+(bal7|{bOm~@{LFc3C9XLxpHYLEixst))%_a4y;7T13txxSI=Z@dIJ!Q@znt!6 z{AkCSLcc&)Kh=Wg_d6}lrFp@j?Wpulri#P!Gw8I)yr~xF zj~Q-QWqZQ*&?nm_V2d(FSZu+Ynl6ZbC&U#qj~r92+xzBk^U;`$Nz`z&}C_4^|5={Vf| zSQY%Bn_3&GUn72^yV{4*bP0bfI;i=tgYXg&QV#fBfkOJ;Ig9viG3NdR^KY*kp$oXk zX;HTVKBVD!XAJzB0|t>R2p-{cKMt;doGd;|%gG>SoB2AG(r}ie#nT!-#qcH6A?8P= z%iRe%Av_<}<)*pZ;u+;0O-nhneIxbzc$(T<0Cvb2r!9g5@CoOnBo?FM|eovP>nWJ)TC@1ozayi1Mx^sg>=hT#|+sPef`o5Ft zmqm#N>0i#}@~gc;(p%~wJx>>-KGDr@P^2$V{9T~9YP?g!RqiP3LFK+n>kGyAj;r!i z&vD0C58kKeEA38^ygCZ~c{9O1jn_Tm{2k$UZ*fQu_}-wj=Xu;O1vuJOk4U?X=$}J$ z_Dy~7W@^6?a?5mGsmDF7`>$LccjFTfC#t9UEk*t4Q_w7+aS#4guF#bez_O&joSddpj_KYQ&?%?sg#O3ql-nzG z%H6E#G!~~5;6$fyT6F3S=yZ#rQ!mkh=tBMQb^le4#~acK{RACSKGSgpN+$9b9t&Un zm7$aHlY0X5Nb>V3JulhZAkDubd>fTo(rEa|lYXe&-b(aYf*zdU`P2Q%bHCav^o#jo z$r(Yvgr8P7FukEo#2#MkYCea^GM_KH&zirt$ozTtN&i4koZ_lc$)^wOn=MIpBDFgvPbx9788h>w^X!mjjze{ZSV$B0QKsOq|f^kvN?i zZxA}U9}+sD-%O{n`ashO{P$+ybqt-{qgo%_uIcn(oKAofoi4QLWa>A(o7yLRAarWD zMd;Lc8~0ziuVuK<36)eM(Z&6Y`ylx%?q|^-;@dAMbvKQBwX4UC=eOed)0&^VF}}5c z8@(~+G2EFp?dDu+Ck3<1^mCx6QQ!HXzE5TNzU4JS_fKwvy#c%h?0qA(t5RvI-|`%R zpE{LlVK_WG>X-ZFz@C$T1t&rKPvAW9mrx$#AU|pf%ps2;91})NITHqXvb060p=&= zUL)}AHBvrXa^6Sf)ei0l+11r3jw(^V-MP`ofzT1)mW)r;aQ{NOrW~FZNYn|MryNi) zA87yMYtSs9d($CfZ$M|jok7PJ{*!We!g!iKE_j@&GI01!)Q&S<27bMUA1?zxDsZ(T z-cR^vEwO%oUHGMGLiiV>6gztmeh=n9HK^z17y91J(Px$yTUH?~A9PZfyFmI)>*dr7o?SjpojXfD-d=`} zw}pIsBaN#@J~n!d^g7M6TB-H=l#{I2PnX_45$bIomyt*4-xJi&c1vC@y{}e-E@aRB zP;VDdujx&~@9q<+b?g^?9vlU`)&h@P+7bUGwO6;Z8Ww`_Xg>!0LgftI$PU~crQ4vB zNVgWm1}9LioIPy*sfJINslP?TkC%bJTHx+CK$lAS73fRVIgk8Dm+%KhLU}m+5k9&& zYOoJJCq3xFzpmqxf8%}={}ymB`G+R@L2&nE>^YH9p8reiIeAL*{9Bku#>0L_m{0mP z+Ap-G%jT0fFB5?$=X=8X(-^=(>(D%iTVZ15=4moPm>2T zemm0#&^6kR=keg)HCk0aN|Ei25eze1RW#p$T z=1G@apzXm@-7he9=0%7x4PVFoh>#Ee56Tx>RTVxA;mLd$!fihMw<`EsslSo-YsAla z&4|CRBrbzji$BO~`UXvxUx+@<{#f*l>O#BwpjQ!>l754}RjeJT zA1bqk>SF}wYWOD6Td2=^E2HxrnLNd5jbG8@SWy|jdm_tu&&F?SyAi~5vux)n-A=pJ zPC%DBjpt{&on=-#sv3THCPW{VzC`Gi*7QnidSQJC^jeH`0p`6$`S|a};DaZGPu%Z; z4=U+p$-{?wH<@2UxGfLguY!M`dResyzar1T{(*Z3=vN?mq%u=PmqCZ^oAH-aJ|*|c zX_a_<#`=!bJ06tlrE>1SYyCy-Me`O|&m#J^F4A&GdI-8nxl|5UlB*uI7}i#)iy-%~ z#aJIwGv&<8d+HLIPm#a$YWOpR$A=uUoTaX0InnDHNQYqssK7rhvIb3t#BQPVLfuGc(H7)A1bls5Wby~;` z(Y5JEqOZ{AAvs#t(mfRR^&c}C+n3EZtIn&tKb)ZDXd>B z{{N_gU;Mo}<5x|ZADSIs?MGe;@ebxex&KM+f{Nw*G~c~f;q{Tj?17I)$vXW%JGBd7mBH3zV^SOejD@`@(j>lK(75Z%TGfytPhu*2=pQ0 zC%)i#!~HR;7Fttf`9kI`geT8i2)Fn``JgL}cO}|c%Y3cJse0M(ZR6+E9nn*PygTbf z-sO4P=&y{?%T9shKCPf_!SAF8X};9*S}k`3wT-8C zJy;Wdtp`^qA2tjA&63Z@{F_it?R^a5--PgFehc9izq#Vygm4=_=e;9(utxl9h0OnH z_HWX`x{UE}pfQTXU*g}apdX|^+@DmDXBmGeugT*N;WmAL%J>(IzV6QmKaESr#b%u% zGt0QvtY^vopPBkmVIvIbVH4jDIs>UO(2e}CKc;Isw*P0zlbX)(i&&2_fAT&7=&@GS zb6ES2iv8AZAQt`B3y6l%jZ?C-)~isP83``Ew0ssQ^Tv_FRZ3bj(~os);X z6MsX-uj5j%<*fX&C0`Z)#TjHg(B_(sQ9R`5U`#|$)vWb!OUhTn>=#SB?GD>->L%uy z=7Zz#>|aUz>yfF9*gMt2{cl(8m=WK&ML z_V`Hd`XX{`jnAX?uu6XTeCLPOF)HDor=PFsjOd!^jB#QPK+hHv$9i6s_Ss*ayZ!YV zs^FjR_?S;HI=IuKAK`DpUSxk?>qnj6P>qf+{(WjjTr2)PHdKH%M5g0xZ`Q2Wdi7k< zs}i?0e@pb9TBP}u;*Vjjw_NO0svvj(Cp=bsuG&TaHWwKWC377tj2>?AD;g(&t&^qG7ll#mJ@4z!6P{>Q2RRXNqp27Ss$?E$gI~b zIf*#CkE&_A*6s{Y{HEHoYMRF1hWmZcBiNV1dLqcDI#+_Ol!q4kaKhL>Jii>zm-sE} zF~B-{Thvc*U4nKfESuAD5^g~D;#bRd4MFbO3h+dg;{zZKA1?N3dS zJtBG;f6A;onD`abI;;4T_?7Z*vy49%pP}hX`VI5ZFr@Fzx`opFe>)ss_;w~A=BeM> z|84pM&9}?6+ys6?TJj;tscE~weBFS#bI49~=<=;-2hWqzZ{z8Suhon^V@Bq8mU+fT42<%N zi*Y4=hRWK0{5sFb>jE?M@dD);!+4GJT|s`1!d20S{9OB8@QYNnn|OrQ1)#^hbr3a~zq#c>Q&9Qr-~Bod%KLD`^`Ku)J-r6=4mox2K)xDr zZMZJ#WTwdOQr@1tceDPVm%OpJ9`=X%#Nl;M5?7JaH0POdFZGWHd1fv*wHW->7pxBe zF#>mPC-$DAQhC`Q0QE2Xx%2V;-0t&?TqEH~^~>z%ZV28RN#57(`e1j`4|@Ngd%ngS ztpDd^-?rZ8-QZ{PVs~A=Pr=puw#`0nyw?Oh6YlTM>HXhXy&pV#sPg`9Sto8r1i*SL z4?T4&{CoI~C(zFZANnxzo~BzZ?>W!7eMa7s@qu2(PZxa^;h!@5_>=RVL4Jt&ERye3 zj>GFiE^=QqZuc1n6Sef`W!HnaUlDks9*n?kJ!sR(*0Yo!>5JqC?E0Dxn0elZKVyB4 z_wz*PHrMzjnT{f-3G{0Q?U;|Ge*wLp^?aPz$uQrRIA0a$t=C17k7Iq)bW4zr(|V~) zJ}$`5qTW75>k}MjyN?K+ymN`qB6%{K52W9rJx%U+2)Fsf*jtlVX`;O1qP?vxoc{~f zOT6={^kbguC8VKZ`Pmo6+Y8nK8q4@O^IZopeps+yDz!=SJhNJtCO#H_jp%^5eaV*s zK0lY@M!LW8BFH;_BM;af#Lt`uT>6u;dW7XN0#BUZ2;7=q$pc2)6eDErJIV37Pb+6?t_ZKs|0pE#rL`?GELcX+4kQAAbKU)YI*} zqJ;Gi?`^Vf_H5H%weh%DpkK-H;C$s`1J~*CZJ|KL(Y8%?$$b25!Ia|(T+#x)f4?}pI58FsgdHpMnr;RxyY5j}ymJz#c zw>RJQuX)a+S8e@E`X9=r-G9j&gzz}Mly?EoN2s^#`Ix8P^DZR(nEiOZp4P)fPE9;u z>`*u2!(l+X9lh?2`V^-j&o6ceKcELlzZ~y0`xfZFa=s^cFIfEG^lbC|-qYHSUBq+> z>1feu3f+qN`8Cdi=|8UWK5qQ|tx&HKAu#@i}UD7Q2TRXeT!dYeLhmZM*P~J z!(@wmH1unK&g?hrs3~uXRm$}~{5f1VwTK^=I)nJEN&9iU!JqW`awJAYuODH#ROw?P zkFaMfr}6dJmczPVDx=q9oi$Qk`aM&wGqS#H){{*BXa(C#_^H?jL+!UBf5ZJ~_EhJh zUESvxe?jyB-V1DDe|9;tP~2~4Hu{A4h2sN~ABX(M#pvfgnYxM-uz{#JF2I!-}7?1!m%THJ*MR~c&`$Au8-loZsk#2Q=ogC4|N{+dnxicx8p9O z{LIC;p04#w^S+butCiw>u1DjJh z-wfU#tD^tCHl6SHIFC4Uo!9K2!|_(k$CST}h{Vgvxuk~uDJ#)4ieE#&H_@I%;I{n9 zdx1gyGW$~kxE&uuJeWS={uI3rbET8#{V6N)eg@Gm*q3Se-&s!julb`;B#;;PPL#h3 z_n(lwn_84M`c&^vLOYn3MJA7}de7DSjMhi@Z#Msc=Z*Xtrl*%T{0@By*;Hm9h0&j8 zpUfJqPdCp}4vpTB{UI~=-&%SCW`Xn1V&5VB&yb#o=skPh=DVLp^lmtB_PmS#AHpsF zUm1O`_kn|QvVKSN-G}#RU&?tfHG?0B@R%p>5|k_8#^||fdU#WG-L($y>HdM};j?Jp zE9p0S4|OfhUv!Z3A^qGba_wz#DBs(u@`}!PT*`0qo{e)G8&C#uA@L~LZ92!#xv16Y zhQuNNry5=!yASfd9@ab~<$~vw4~{$sV#$M0-Xijx_*SVNEI)0e=d2gfbZ^H?;BH0s zf7a(k#`lXi%{IgR1ieSmm5ECqAPeFKW~EUt(1?SH$igCc?GNbnj>z7dc);Y7Oy1JOO=1_q^CrwX>X7yW%z3pp$^C5k-i@6* z5uDFU{O0iayue$_`MgUPiQIa{`1oeqA1`KKgZ?nz&gHz|r8{+huB*cDERS>N7jM@u z$#}6oKzegV9G8ZEWPQ!=+bcIX4_W)Uv`;Yt_jWHVy*Mw?*SffzzV!A8J-xl?R{`9J z2!-Md`S@0o=8?n!a@lYI0{yW$^HAP&m(t7_6_TBLFv^+na zqH_aee*s;U!~1x={joTF-sf2_{fx{P@QXdbj|};9xTpM#dk@+}9>smXvs;0^|RU%J{vPh<<2(cd~qYt(D<5 z>lezCMC8+ZW##_WjDNL-|JoRS@(0Af{Y!ctu_Jc;BYtLRXOjKQ5N`SH?q5~GFMe`Z zzefD?*7G#J$UL}z9itP-G*3H1`|G3lAE`5bS|^F%_kO3&_x|!$ zGxrm9#?NtUXlK1w)EPhJ=c4ob@;c+Eb(RQz_jsN0TYBF8A94I{HT^^R(X|o%RHJ_I zuAufJ^)3D84Os9R@y}a7X#dRH7N^U+@lre+o!8IR8NVgB?&EdFZ}E+{z0UYkar$^y z))_yoS4Qdo$vWew{6!T1r(*aif0aSJPj;U3%I( z=jbGOM&fIg-6H3jX0!aBnuGHz9|zog2O#|J74PR*ay{69<5vh6@~BI&4oCTp;9RGb zX`k9tY5o2Vy`RkZGWz}6%yRkOKxVmoe*oX@L4MgmK3~2|kX|Owon=xE?{mG6x+D1q zY7g=A4F2>sSK&{zfOUtu(ckN@k^Ud46a8`S%(F><_d`Ty%!b}4)cjjKAIkl2dcO2j zW&IF=C(3UKZ@yIU5@*zuhoSXKtV1+?F}c2X`8?w=jX%iq*ml4d`sh3l=}&r(nAQnM zpAz4SK7EGi^|P|{;<#pM2v4S02)Fd9-XB(eK5}TkFQoU(^;wJVKezbZ?5kSs#Pv;= z=$ob`_2Ks>Y}bnN`_lii^i9{o>Kkeg`UZa%J*wpcZ9cL6k=_`e%pboU$5H%VKwehz zjWhVqqxV4yt+}%NNB+m!2t1MhB5;fUs^XWpJ#4>5{Pui!cyEKgbBI4#2d>7S#X9ll zwQ>Gz8hdv56ZsvU@8+Kiznc5T@5b-xHFDx-18=$9x(8jNa6`|k`EHB<-2W~8G5PMG zz4d}0|JJ70Nw30RZGPn(>sOOU@-GX4ykOWh_OBX#@-G(n5ihT3_Ecaf$F6AiM-&$tf#e0s8H$Jc4 zOXuki*yFPG{F#;82c2VjtE!w?bo18K8Nbzk_y5K52Yk^`CqA(AM}HS@53S5ePXv5s zDpU}Z~ z*Py>}yi7muXY`HT$nt)a=JSo+FyC`?^!xw%9D^V(%j)<4vuEn>wwvg!RyMC|}LLr+m)Z zh(DQV&m(ZlUzqni+xCb1yMX$9mCZ*r@~LevtbChCat`_%`&l!6Z2VTf$D?(Ndg8bA zhDYx`%m=@%SIJ)_`7!yx9{h?ugBtF_kSk0KWF?l{r<7e_$jWB z$hZ5y>Wts+{}&b*|KHaczh!6KuhtpAO`m_LGk%IQBlGLMtj_pt{qbT8KlW2o{&mj$ zZ|lFYI^!pM5T(y(3_s;(`Fr>7AJg;l>%Q{+jtD%_-;Ka6f7iXc3Vz9t&3#|XeCNTA zoBkw5r%nnOR1b*tK^{lA58GZBr1@Ms_kF!p@zZ?H1OE&^?@!}?p?UuY-vN!~xm%DY zDYAXB*3-JPd0zigvs+JV`4_Mm;pxusIvvGf6d$(y#KeQ<9J}-yjxQHzUCqRUsuSaZ z{F-&08T6?&UqSjJDsKhc7cky_y)X6yay!V$VA2qHwFYVXzq2*#J z^1qBX_>NHexu-=g^!Wi=E`oJDz3v>+*`5al{+{pqZjpWHGuEBUtaB`VeHr@KsGpV| zO4d(H-#TL+7F0j!b-0l3L!>t%c4eXIUb5^$Il6ziPISMw4Ba1nR_G43Tw{LAjyp|0 zr@fx7PLrM2-zgG(hJ6MD*r0&-m0R(@ z1Ak=c&*j>R_duPCkhitqble)1qwk9Hb+NwUSQWmo{Wmc@kl*2J>-YR-`(9l9{HONEX8T@rD|Gh> z&`I8lZvS|>^>*oB1fFQGBXG-~0Xg_RDqNG_i>AC7{JPUNZoI$Ccs@PO38kPi`-)Z_}C98HkSb-di6b!~Z?_SIPG}ym4xu?s*@=G6FC> zqj;UNW`4`jdw~JJ0UhE4ioOP|bE|!GZgob^t@bl&iu~@?*}PuA%G3H)>l=(dgP;tQ*)PLGg4IIr^i}8`7@5yc(%(O6yWflp=kK7OZCEQVLhkUr z8=f!6m-i5Zd76yP6VCgPa#}t>bUkltWZrO2P)+A31n;x+d!NDlp%MNrDHad)`@Ms) zbAoc_d@;Os&*y-&eKr_ZoNq0@^bXN@B64KQnect6f0Fq=gx89n=@)6g3VwZlrNVck z*Aaish&$-}c{n3OpF0lgqUQrFNc_-{hrXlv9VGeS@3)GaDRl<(i_i5N?RqHBc)i0! zFaF-JsmFMolQ{RhPhs~O2*z@)-*Z*YHkKDm4Dns?{Z+_W^gA>u`QE*18h4XkxPpDKc$rVil-&^0^YP-p5KhN#M3o!Gh$7#-KRjJr_#DniwFPHoS zs%@y{d$eu0S?v#U``G{&C+)X)G|Gu zOq$QrU-n{=XS@eZ{0ByYDu6w~_YVeS9bW5imC5Fb?s=_GgZHSj=KbmP6v0V#ZEhlf5MUdn=BMhJVR)!@8Q}$skV`z&|>IQGllH7z@F@%K90zs_~lh8N|N}&S#jd zovos$(Q(_}s&)FisO$KAJ#By5Vd;z1@1y8fCI3VE38ev=_eAKJtS`&^A%utMD*A~0 z4AEChjr=%t9u|x|{(8WMuh;Wo{Ff2tW-H|hDZvqzU!7s)}OWNF=ICx%yGF?uSJ#ek|%Ig2Lwl}$DdVXFc_Kx_7xXe-cKEAd%ur+N)|AWs&&+B$FX`l4m zvL6c{)~@Gs>~FhwWCz6_Js-CN#{)ZneO}Njh`%6L=sf$IVkgF^ z9)H)_=zYt`?XvNAI{S6`c;*yrE$4hep34naJdxrk}r?E zpK)6B{jp#3<*ycBri{MV-vzFy@A>-x;@=y+ul+6U-!K2mYI;8_`aGYJeZxa3iqoK% zvvI$D(D?1CORDI5`3{=+_0{w~^`HFx>Uy92MDpVyV<*Volbw4Z`l-)D@@}&7=dcUu zJyZ^U4(cGD(|M;f=be`So$<>MVvs$}*1T`}JofM4JF-87{m?`=azyBP`$3IAi@cb` zL+fQ-P}33bvC4kzY?{V_dUM@B&EE^5_8icqynVOld4cS@nq_?QE=`|{<-2a}hxPgX z!Mt7KWG+Pf^1H%^?MKbLr!o|uVP082+Fz&rKR?5Ki|72^qu{x6dSxFx&iCbdh+vVg z$@FIP9RDHzcq8!>@{&A%%d-Oxo@Ho%Jn}u6UW6jJVtz_IugCO!`?|bJovHPY^yens zpVg@W$#bR$D9_o}WB3jH$o!TydC6_kKlxr>R+)YIs>FGtkgs@LZEd=Jg#+5aXKl}~ z%x77Z=6n`LqWLVdM!)a8W|sT(&HK*sT^fpW%ll``c;(}4%-gdQpJAG`{!?;(eLG}} zvIy3NAAchrvdh_AgjX*#?qna{lqmVZI(V3apl z?Zo!s!H?iPV%tCIaZp^q`hITbTeSUyp${gG{EEefKH87SF6{f})LxR@*G!L}*NGng z`Rvjo*q5m7Nc+di(C2#Lm-c|4UdI+2q%#OE=1j`ae{+Z*}9bFo`D#-DpP;(4T;HzoJb z+;VPiz2CE|ZGSPkVd;g}LqDLu_>=E`SnvC`e_-Bx!gja)y_Q}$&34!5(b>K4i~Q~Y zBA8~Z&&c%?*1jv3DV8E*%j?xJ&iLI!!9NJx!|P76E|ZU~*HArv?%hoC+_9_jKAi|W zaeXWTxAM?R=l$KUIUEP9z&^DC{0{faqDRmV^Sy|IylztGCp6~F5#a2D>F|kzAybPvu}^MjL+FNdXD87c+16k@%?(3r&-2% zE8t^Nzh%E($93iSbBg1@2t1iT!}>OV+H}FRv4873103(~@qMt)XZCmS`E2RcX`kUf>Tw<>eFm*ZrcTNJ zvJS}6-RKXrsK%4g`@AsA#J5V{ug>J99ppbyAB=dehHoOjnkJO*v0W=q{ek@?zUgQe z`ltNL_}vp(wsRfN(fOo+zh~Lb3p9Ufx~Gtj)%~sScsA^+`NAEjv;%<8hO69{{L8<=H=LpdTogR=(oz5h7YOeu8{I z81Dm%--u`db{bqR&$GNAi1y#MqCOm2hreIkd4uS+j^B;T1C~))?gb3pVEQ#LDVDrK;uIc!2MU=n^B&}#c>zeO^O$()Vd7T zgwPv~6Og}PKZ5-HDJp-Q$Dsm$9?Nj5uj^gfpGN^O1ov5!}zp`w&^IyN{!v z)7oxP7YSbDdk`J(!ElH^tFeAbe%J#V-?J2Nbv&%+OVbhbwBoIljzM!GHwqtSm>;?}XgUS! zp;;3La9)HMfa!0=0bMVa{&}~;Z^mA`&PRHcE@P`IuU;|J<>UjyB!6;_svpH#+%zC>$thY4)Lq+=H)rw zV}ZTG#@Rj8u4({3fxaC&p6&b+nj$)@9DldG3%+NO7%kgalo;=>LzSUoeZzJ{X@d`b|`ZeNLslfhG zd`o^4$Mv1}Ykm##DNZf%E%500){DotT{mk!x!e+s;Rf9n$$C<1Yk$U7hx~Sm(8Qe}wTApda68>>}EqPrOIxaQ0!Y%>IzK)fvBi zUf18$8NU_JxSw8N{C^wAZ}xN69B13-MBY{>y!1YJbUxn_$4lyGUi@Ow+k1PR@!Rq5 zAJ!Sa-T!wkF#g*Y82`KC_`!$9j?U>jtKNI!`0CCt?_Oa1?^s~`S1mC99fChOUjjWn zU->PQpAmZkzhprCm9G29zQAtA?qPTMxw&@T5r=o(W!=9wejn7kd&cv7xV?Mh&$~Wg z-QQ>Ki4Zuyg#0OTwLmoX&*608t_S}fPw9LP=YbHU@pA;rC`)j}4HBPlzDN6I@N0P= zY&E()>o?wjV}*=h_IoR{VEW!O!{edEn=B9V7MY-G9oD z6fi$Z?=N6_;BH>)*WI17UB4#r;P~bursImTd?51_!jtDIgj;+-Y)J7auCgxjUT&{E z-_3X4UPbt+Jxy1$PI9{F`g~1SKF_8*q>IfzGR_b_U%I}p3SH+Z&x+$R%%bQM1VLKA zm^`vdao%tx*5is4H&cE==0WyLnSHq##p_aPO^y3B{!8|c$@}2lU9;wMy*_C3p&1XY zYen=U=}*?1-ZaxQoS%BnANgk-KeJvA=;_^ErCqKU;j1cqgKZYlj-SfYINfP9CG*1X zjo=&;evV=ItiTn}rW8OV!{k{b7P&wq+27%Xme@33;{i)*PxW8B5 zNAk8@?oU#>TqaNDV6M)keE~=hazCeXX@38Q%v=;d%JKdZN<45LF^zoVtyCXV#{I}D zAN@eNGkISG_!J{h-Ul$hG+xSQ;T|1c#_a(=>!stt^Y#4P$@0E&vyXa;-ghUw!MZj~ zD7BZv(Jy4bUdet0CXL^p4(u!O712w-M@@Vs_S$hcU%3+d^hy6p{-?XWivA7Z$@3k; zZT)NOZ<~(^-WTB~tG;(1;1N4cuhD^TX&;I2yaWBW@y%0zE2_T_pQHS&&zcjzsx`MyiR$gGI2n1+yQlF%o3nH)!^i z`KZF{b`1wvkNTd^`tQjlxA9duU!`>l?}Lks9l4wL8)g=X9X;MC`ckP!__d7UW5N1eoy+$_*heP!57B+rPv1uOS(RbC z(ET*JNBrey___L21DDULMFPK8zl{y7*VTs?$vSo86PXvGzFNGI)@i!0()d*R zol-9SMt*)uL<8@W?0Jrs%NqAd_UL_*sr&+sBiJ9A(&GsBr5+PH>3caJ z4RlY9YdUSva9*z_O7lJTbLEMD99ci?*~0Tkb|nWnq5A(I_0sx%e}HH{p`WE5-V?tK zsej{NIuI?wXZ(r`)}!H{bU!3~X4doEJ96>;kJ?YcbM059GE>yfV0^s}`yJNB_FW#A zbxw4sRs5M$7K@kHS?hVgWA>RVI*$s&!-*P|lv{P!bcXJuU&eCCegF zeQ;drnJ@wJ@cSpK z7ycjYYtO%ne)ArHeJ)^I)u7%dNasx;&LDei?Din+bHkK|=kc8Gr{&%~m2Tm4K~#D@ z`E@-%5&eyGD>#pUs6hM(Ka&?bpXL2Xt3aUw>$#lDw4pgSH^}~TR`!R%Bjs{E9}+*W z;W#RwU8SBY`y+9FiTIhhOSs*8EPS~*KJP)~Gq9g%)lwce`Ss$j49UJa_z6r0vyTYe zkYaxVbdY^P@Q+a+bdl%Aj~JAE3CgoS;r+SJxA8v8z^}mMvi~60?L=HmIQdIde+pQV0zdExv^WIT3#6AwF>ckuhF)!JVnyB*1^(>&D0 zp3`|w#Gl6Q{sqks+U-UI^|0sUmqhV@q|W$ly>H$lUa0;*Ed5W;CxDKYf8{+Sas`0A zS4!`PlANq|;^$Q$o}hDYSK*ATBGX;#vFzN=t5}6LL{EW0&~K~qf!|A+UUYL|&#N%+ z6`TE2vwOc7a=~_N)#dv763b3e#}#%E)1T&l__>IAajV}~G|w?lo%9^{$lI*CO7&)X z=??T~eQe6%-jsuXyz1pBcSiMc_!8CZRIbV!L7nGZI2HU(`th6#2mj{p zw>UTd_Zb(y{dZsg(SQDQ`uKO$$F_g&*jX36_NQOXyy{K&!Y`=sckIH!3Q(nvRc=4s zwRO5u{q&l)%J9!jAD=ujFgacUN$E&w*P+SSl*dZ%Iy62Gmgzq-R@yaL8rpH>iru?0 zlR=ruBb$qpMeAVzRcgl(e+UKqQLg3>PcSC}=Ptp~556249@(P{L~73XIs18)s!8Q% zJPX>VUe0I|AL%pwn;70RQk*sth;1UN^I948Fb!d_xuZ3(LSSEl&7jrE!1k z&~+?l5U}gVM@ROU84AW%rv6L6_2gbtc9z#Q(Q;Re>_6)FUp{d5`Rh0MyNVhBZ@)q zjo!_)&+%5oVT{&@|HgEW4~&l<9v&(U4G0eoj1TWBVNrws_7o=uP(mQ)VFU&0PLA#x z-R~b7+cRDqD*2N~Fin&G?&0wX(sGv-wqLgMiY;5V_wR_*3V&`c>?rKq+W)dkcU%@e zjQoPnJNLhaFP^MaIrFK9D}mt#KXr0=G}P5eg(r{f7@aKchZt`iJ}^A9?0D(mq2ck; z&=rSE<9kcRAsfdigue*GW8hmx$9IeuMF!JzuwG}UV=>?A=NjQ=;w4KRf;@A|F4<=c}L3he{8g}{X>0_yq~=&CU^ul{oDoRQn+r| zdK%uM{_`lPD)$HYj!`XONo6Xk&mTT^JgH9btof3>_Nhj#5MO&BL; zCOl4OiZ3*S1Zbl_wEtMP$bm!qH~M=@ll|j+CbokWCPB@zg`-+X(69Mug~P8Y&-+yFs!;9E z&@BYtx_%=#ljiVJcseikF_AGvIZ@mVtvIyN9~-66cDT6zN-k~4R1Us@kmGOHYYxhf zGXKJF7@ZhKK9K8NuMFA;J;I+#N zf23bnFO(@W@}pO7eE4T~J#y$&=fQ9PsF+#Y_u!syt~mQ2I|gof=Bn*~@-gtI&7zqo zDnlx=n@K$ZTZNjg_QS^>hp#uh`{?$V&53q4v_FaH)&1KZNSd|)MO42{!nwR+e0a|={37%Bnthc8 zc=yn5Q#A2<&3mp4<}#^3*uH2nyUe=peEu=}S(W=3|AzR<)9|++MT+2nziD)2vUG&J z$(}06DuL5$S_Y-pmycWu?TgrwT=PT2Ba<&WS67U}XKH7B1*%d5fGw{AZ>j=cQU#u^ z0>5fx-^l3oBNTQ_9vz!0-CGOWckJA9Wq<$9?OU(dQ3aU=hJWIh$)5!}e4=vtMHMG1 zs!vs{9R8}YDh4XZ9|ehm_#guKf#MMacBA9NlSh$HM@$EfK+z~X`rKd%;7~MSAb1Hf zQewuV@R(uO2m<(In3FNM=^DT(Q4y+xK;DyZMU8PrR&k=V!-1Fz|-H1pBfksEi*kI`8T4M(^$4P^k-H!arKl=%fgGXcIrL%R@AyWs?%0K+y{QSSt#*a#WRHhOf`Tua_{;tsjWBW@{ zCu)DOvi+@u`<`37eozUXe#@Fi8!8n5jLK9h(q0J&SaTJ4#mTKw_ghbQeEVP({}|ge zEB;|^z2cwFTtNMR(u-6R->jQ@qvu0rz4o_m+ zU4_5Hla=Gg!v`vlFTy|TllZTUSr2+dZbi&s2N3xItIp~d{vCXmnCs6441A{Lm-=D+ zF+4d@+CO71%eW;$FQEJm)vS1R?N|NQSplA;9~XZ8h1-&zn`htu$t&JsekWb;dg=C8 zBt6G7-#m}aPl9~E{I|DX`?5_tHx+hlx@_mm`U_X$=hm&JHg@v8c11<`M|NfMiZP^b zCdd8Wiy%QGB?RJ|gEi&X3|Hp=UHdAFI2SMe^JurM)GHThH3iFDi&dD5#Qi{Vf}Y4RlvB_CT_5{eUOKPb@9ghI>G*rjaU_XwGW_ngJ0t2orCPtH)DbFo z%rWV$ocD2Bt!}lsx06w<^P0b?}RirLdU3fMy zJf$X5m*8zs4e@iW%JFlj+C)!xi7AKnF5!NA)E*GWnIU={cf3R^(mS12=TUTL0Z2F8 zw%J>)RH{YwAcvFOihp|?mE6wlc9>oxPOH#SwK{KRyse<8Pe*l>=}7&c{wTML+R=8I z_X5VN+-}q({b@sglp^@@5WL5NrvV=h!Iwti74di1h94bce!IP(6Z+})3BAyA;)hbb zXwPyy`yJ1D4W8+Qmzc!!df^|Dfw`Mb4H3OnPdW#BQpR-Zg?Korobw31t8I+WNe-!D zdQF`Pxljr`$U_cPPm1_}Kgcts@6q_FKEkV#gUO8yZsm6;;R)VUg5?!b;CzbmrIsYl zqh~Tn^*hRGRd?|FVo7%vOFk#>yoJ&g^QrlH$V>9*fu4Czm;R?%{zxuUESG~?4%b#G zhXeJM!!|UtKy+IQ`V)U3&P+EFWs-lxw^tiJIf)3|q&v=#x|7Ne45%*=8qjN4Eu>%6 zU-wL*hdXG=UY9nIbPae_<0WTtLa@@Z>xcZ5M7k=z^A(a|FnkZoC)xasLJ%hgYmf+ zV6;FlHoac!?-6J>9)AfQH-NL=Z2GE(&oI218pM22I$yia_mr^%%^FX#ncD4Mm>A-E zex%{Y8IK#_k#Xu?l*m!Og&@ZflHbP{YCP$MjOU`nCWb=-3GOJy1EN^%xgf*sUIDikZq4ANJY0&j#4l-Sj8M|nQC~Wj4Lz|ZBb5x%27F|VS?>0B=`jNGubdhddpF0#~lO*X?WZ< z91?xhaJzE}(mi)X(p{I^r2t30l)V5vO@sJN_&ihIy^Z@br0Ka-_xG4?ziN>F!tUj% zoR+TywPRRyAVuu0Y7jc~A|3QX@@~Sr@GL{82BBxa%l&fBHgwW-4d`@Fluiw!LZ{3< zLZ^(TQ%2J%!*p-*>6FoQLi;0h%G@Y)%G{>uG!dl};6$f$EjkVObh_QpX@KZJbRm9u zy8SA{{cZB;gm!`s36JU6j-0V_g=D7h!WD*2B2Vrr@R8&kbseGl((VZHAAFlptB`($ zk)!M(Ek}0VF8J#x z|J-|}ec8X3_GLByvYLO{yQ6ZRJx2X>RQ7J+9~@7X^X!|1f7y2k{~Bi`y|Gc`9B|@a zajyJxW&9C6X}qcIA2EM@{-K@3zY(-!SS_Xus*m#t9eR-tIFgy}3lAGQWo6v;H*&w6 zTMV5R5FYRk3V{2w;Nwv`HT{9m$^DSf335mBg@K!+a*jI_rIV}ag!bP-a6>2ePN9?g zc1@?_Q91!mbV3^nkSkvw44Cqb?_fF%!J?Dgsm4Fzc6d$4gih{VAv$G|p^50?=H27u z_qdm$JtVikuhjiizTegTM*3YyFKBu0MgN$7T2D=68ScE>)QgToe>fl0^m816zfZrz zO(%S<|0s0-$R_w9cyGZ>LfS38*~Egb!Ot>%_}wh zSPA%xH2g8(pnmU9N_(Hs?fqp!-hZy&Ps+P#|H+dM+hYfCYJck%jYm1#hz>2U6a2q2 z{rpmi_H7RMaD?cfO7LOEiRHtb>3lrCd`pYa3H4@}Zm@~OSIj@Oo(kxkM|$&%r62A( z;3N564>;tv75XjQe&;(Do_@hYd6%!_Xea3d$gSu-Ex*!lC}GvC`e17? z)bNB3fkVRMjPnQn@Mffk{9)C_?Qc>)QC{K@=wQ!7n+Q+H|Ec{&|{m$*d`>pyN;w14T$OZHW#hGTPzT=1s zk$hbjnaAgKJPAlK0f&!_e_-O*SJXC*`_<25T`U%^uPJ+63DH@j?4@hDp7DzAu zQuImseyzU|ucLW%3t~4X2=3vX^r_b0_$6_-PNO$u{;GP@ny!g0;-^V^vOg*M*hBt1 zQCcO>bjG0^PNvu&X&TXfP)ACaPo+d}r?v?m{obF-iC#|)QhDIHK*QIN9i#9Oze6l? z><=O?b~l`_y}BRm?F9CQ>uqc#Jq~-+VdGnYgIaCUMMJad0Vj+|M^a`P_omU#+NbxNW_M zIuH+s_Ch=r^liINk2~^DD1O^O`S7H1)rav3GN_9&?l08kW{oq$_r#?l|7M&aenfaY zj6)u0i7h-%bUaL@v3}8txhw>l?74eGa!C50Nz!<8cO-c}?e55^V|mr$Elr9pk3XKRdU`Q#q~4VpkHp9OHT{*Q zyK?4Mk3XKRdQ_r5_+>mpEnlzHMtsJ1c&50FSqW0o=B;yDQ*dKyn(Yzgqk^R=_WMD`;Pp_?7lw;CFcn zwv5zD9(LIsrFucdE?*vHeIldvLzA}0X|M`>PQF$1{iOp#%qO*S1+PcE6#NHYTDA(G z+&z#J_+5wE|XOeXr6cjmvKj^W4sFLf|6_hQp!yaEAkN1S0XC&3} zZk7YqN6^ok)J;?_*(T+o57@4wpR5NQCFNy4M`{e^Ngky<6lYn#axCIkUOCIJwERe@ zXV%xv*ZBIH@YDF<`4FPCB)3OOl>6dNyV32G-@^KP46WgRHMfo>YhCqh5~y|4QkfOSQh9n|_1Lo*?|~ zrUmt7l8lOJSOs|{jWfe#QR?XJYL=c zxFv5`ogsM+;5L4iw-CHa{7U;Vr`iA7;^i&>3v6W>hFWMm&l z^Fd8#_-~5zKF<%-68wT5YrC?4MDqo&(5ttoc!)r?2Om z9Huih3w$N|b6g-@koma#N{j9t>}Pbq;?4`*-QS7US9iXOe?a||@&3D9Uuk_O^Vt9% zFGm5~9`CCkTiJ3BAMR(Z`Z+^b!zt91T zOF{=F{zc%&u|L`P^`2)`3+AncUQNiYl8|K?$Nu!!n61#etVz~?fy}maM;JA zT$_GJD$uW1{v54n2-cb-pD^YlKpF2@tOoh^r9xo^2y z%k5%4Zv6Qj=Gj+*Pq%1(Yq`!~OgY3~S?|Qc3F;w!zDDO`y}`}k_4o{Ue}eKGf773j z>iC0lHp}?5=A$0k!*R;wfW#@$Uy{SLUI)4%rPl{DydL20NQL8*kTWx%b6r|kh(9jzBo4X4#39-4l!!x;UJJ(|1AoQVPvUO`aND1e{iy-m(!;LA zAp^LLU-WzcuNJ?=A%pTZewn`q@G9}EPWJzy7n~us6J&-+evO^zNNGFo42m7acxF3l z$L&&N=ZRl359$Rz=$wmwjCjvh@gxWj@n6q(*xw?4hW-1}cGtG6 z@$?AF+x!&&K7iZw7I_KaRpM8@mVXag909$t%dNbBul4Xn&tM$g1!D$#&3rO`ew(%f z+83H(#vbSbmfZ5ltB+lyo zA^JU^>z=P6{M>(u+t9fiqOUKg#zDyg9b_wbiYky0gwlkYXZw@_r5ySMB$Tzbx|YudjCb^RYjr z_LuQbYt$D$9nAQt6tDL%F4;elc)h<*OXA{9=pgGo_Xdn7?r)moM(&QpCf--G1AaZF zH|z1PGCHo5xJLAI1N?EKQ$r?)=dafTe7#g%y#Dm82f$B)9EaC$_53sI6^L8#bIT7W0b%OD&`W521Y_1jicuHyP_jq9JkuXQ#d{;fmi> z8h?#(7=L}V)!}%nS-%b9%&aeicw7d0D6q4%&MyAUpOJpT8|hWDpNoji+P>c)W_{>-N)$)yZFoN zZIQoI_rs)rG>(K1@8J3ZKiZZ@k(U4-@An08%dhm~u{7SH|J^?~_DuYds_jSutQC3s z^|~+iIjLZu&abAP9~6Hj7*F6I#tg(SHACZ>;zl%jwLh(LvaiW~r||nMdSfJV-d(Aj#8F^2k_^wK)ePyCbZS##62|2R zW*kFhQ9IKOBiv4fmHmxWKhCjWd}sahVCr^Wr*twSG9PGIPx`6hDRdY9r0P#^qWyBD z7z_07$4N4IIj5Z7MEJ8_DyPfqI8ej2`n=z@avmAy6j86E(wq1kkjh*m>FH}EU;2RT z@6+d}Ne}iTJBsjrV;w(k#abh`BL}@i^Qs5M@6CG9e;51fB+9L{&MWi6z~02`hX8Kr zhgs{qWm*&%~=UC1rK!3fys~m^-!?>X33GlkRHKdL8;fz{|z`rQLx&jhC|kZp(>@i}?HOVCt7QexYqU$3rOID0crn71~j2 zd`;vvkcYpF*5~hgXi@XB{tkaq`~*Kfo>-&z;myS#q;`vcMs(PN^@1<^a()5nb$UN? zG4ut-9{j%!f8Koy_w$mHdPL^k0X&|+0o>xRUIz`}wjPmnqX1qdezj23jTQ{A49V~3 zb^p!2I$EzmEDmJicr=lV?IiZch<|SOMe;f*0T>^lxQT_weVE%B_}@1D zQ9QCP7~jsIyoH~thrOeCve+$o55{dJy;>uk+)DdAYP9#}91rxUUKc5$M~xmc>vpYL zE;+sn>To={@ylI*C2?MoKk^fpkrAMF3gUMu=q^W#~gziQ`xccNYKda_1-t4^{Huxo#z_uT|`u6$hHmeaEQYW!Sh zbJYI$`?lI08IQt0lKTO*8SQy5kF&te+2c(59l+zqSpbiYvvxFJ_Wwljw6kP@|Fic0 zw4*?|eR#F*|EZCW?op;!p#N-oN&5qMJiP*VlwQjHE5;w_EgL@(G`t@o9)D2Y#{WLT zPlHhW*%^d)ppR+)pNv!E*BblNn~-=>ufyy4luhhUY0>-@J{Q?fnLKq`6-RqCs!c*=F<)I+QasR{nAFP zbKOsHp-bSeRkAZR+JTC8hS!a1pc9v8f4#h&k$!7?i~25Pf3DZ^=Yn~i#ZP@-TL6#e zX8^b5UiP&Ga2tQE^G;(gX*~?`Kzycg(g?NBa$(kOx)3*Ie{N3kQs7x}sqAZ8lh^x$ z`FxA<=Ts|>=8>PZ1Q8E(aaTJw2D(!$>qjzgx+M*xII06^UxtY1Fztes%sfXyx z{vF#*tncf6I|~uV;dZpRs=M(s%esxzid?g<4?qMUVU2HDA=g1$tkqoSRb#UT10j z439su158i%1K?jCZ%WBIIkWH1>}N3hWd;Gi1i7kR$Cv$tbB;He`9te}Q9nZSG@Ea= z?!Oeh8}QBMdxiZ`xRy}sAua!46VE>=J~Z66&W{h+@&FR1Fn-(<$R~7{_@3CQ((6)x z7g?8DH_LCSa$m&4pNpKFL-Vd-G>Yy~SdRM_9jvvHo+ElgF5Q1{C_dJ$a>_$`8{a^2 zu3C!Qvy_4#t?orPke}`^bS`dNzYOY_*2@|PBkyMZrLtS&(CKg=6f{HL;7kT z??aJ%B*#kiq0T2Lo%KYL-d)SzB(+8IOY40?cfpTyKxVxmdQWUL?!0jpy=h$|Cf-cP zb5DRCuK0F_UrzBk?Ifi#~!cs?(dgE z{U!MYe(b;nlhOY*Rq*-CdR&Q~+U(^;e$77owv9%gLN8-2!Pb{E=De8Em-J5h+|rkG zmOHcm$+TxRw+EfA5T|E-iCiRr!)+T&>K}i<7#bS#7il^%G926|Q{w(4XfE-GtoxIC z{PgJkNo0T7e{!#Z{{`Bj8#Irham({Q_gb{KoZN_>4#q*1fj%$SEw%+(zug)WW zx##xO>GM*eKk-HD4W>8V%zWI$eFgz1`!9AP@B_>`)J_dI_mkZ!?{k*>=sim2UT%^% zxtAMwtGSn(0=)&&!~Zwhzd5%jYHv+@SZ?{AaL}o$_FS#o^OXwnUZb9LzxpfE-&=Km z?R}O>=ttJqe1D8`{rjP{-He$G)BOh8m4{I2b4>0NhR(X( zIp{a6voCsB+ruXpPf?kD_4)(;-uiN9(6 zF6%Bzou&0bPVz7MB`{-r)*b&Pb;eJAKJ~}6|3!7iZ`0>f^NjzKb;i&0oe+H{>x|!~ z&#`&N|F$~gx9RiNI^(zL^FW>P+wH%<0{&NverSDHoyIrCU&7<>Unf%RpPH%&r<)wd{_~oQ|)@3_MwLH zpI>MEJTD6Asn6FLKke%YxBqi>#@}z@|7@M{Q+z$#{?F7If1ic_({;vg+v$IvXZ*ie zXZ#eG3e$hM&iHLR@xpn=|4((spRnlji8|w_d25*dYwC=@)55>H&iH8_9&Z1|QT%Qt z`$h5eEU2|wA9 z{UfMP)%E*hv*q(6ET0#bluyjpv91unV-u{-y6NqGrxM7>WkF_mE=szLCLu2z0q+pOAl9z-}aZ^RA>D5IQioU{@L<%eX5`K~YpnR41XYu2IS1JD|>m>irMCHE)_q;{y;;*Xwd*Cn4 zPt3Z2ZHKOk$PY4F`lgcn+xYGI(2MJg-){ev^Nj!UI^(y;@5cJzZ+Y?*^`p%{TYueD zpZ59kXXD>nBmPSI{T1_!{}1LF|D6@^KU!x$*p7!?QK9~7^@AOceEB@%{}UTO_Mh>( zD&~#wY25Shd=zR=Dc*DRi$zH zM-}R?Bv&?mJC1lyo$=fJxxGgGw%#Q_DI7<9bB*{b(dUhI#&6sGH`E!wz0ULcI^(zb z`??zOYyP&rm^H{zbmTGagWQ8 z^BU6QsUf<5r1glI4`9PI$0;*>KL~CVDM*|W-`{>A^5eb{i7Tpua{mbK1)=e&GVAz! zz+p2Vz(vz^Uf^ysA29c{I{N+*eUFNR@60_y<-FKfFH#JN90@*9#9TTU^6 zYd#kf-mhw}UzLbcFPbVr|K4eNC$Aqax@8t02IrFxI!+zX z{cES0?q84UXaD{;TkapI6W!lbg6=pk7Kx9lk3j zD|O=M=SuL?UhkP_e){@sDb};;JmfN*qrVUARY^H7IO6dBeE5fazNv2hq3s9#wa9^5 zr}xpXly_Yptt*Nja7G2WAV0VkJ6Z{Um3FkE{p_>k&YWMf^hlf8A7I+nHMK zdZPM!*8YQemOHA?fgNvqIXz)7S7E^z=W6x)rO|h!zf9+cAA$T=%!1<_BjdZC;V+&A z->Tso^z%B-w{ccJoMQt1&2~B|ywrJU$F&TNes?Y? zMQ5Uu>JGu<>kG=;bf)z(+TTFuyoL#xmM10OH*)`j>PM>3eKDRSml(7Xzpt9^8N~mG zeR%^S_>tXs*!SFe=APS>+;i)tcax}ka`=qhUzex`o3Coe{F-rp_Ih6g8OY7^u4!feGhKBsN+l9INn8eEquQ% z2ukZF=6rTL*24?jF1i1v12?wtyXKeU$+;;W1g5MA?aQ22)X zE~>jXz?|RabB;d$Pzj&kMzBwxXB~;$ACNKk5#e|=-&@;(FT?15Mdj9#OZWTKpU^n6 z$C=1|pnu}!K7dz?pXnE>zXE=J-wAiue11TLNglE}=sV)~ z2#pW^KBKo?UGIaaQ_e~R9cpRJWn#uj%*zE`jBz>V0Ro(O5 z{z1{V=DS9i<$I_%ONcZ+`p!Zd-t~9wqV>D0l6?O~JI41BqN~|QMfZ1-zEO!;_kZTV zH-G$n)~Y|p_0u)j;rfZnGS1MynEoT0oHX#2)qJnJ{p(i!BV2#OtolpUi|>Y4Ro{0a z^(CPnJoQ@e4r0akrl8wwj~DCyq>_C9_TpE|c*eP3l0RrQr~;Z#EJU zqzd(rz8=(ZUh(hz`v~T2XUpqpRAAd%HKe~AJ;dL8qW#`jYw5T_^Yw>lS2_Pf+KE9x z`Xxliczs#g4yiX9)Hj$i$9_HVf>dFV-H!sA)mgtHxs?L zx8t>%A77$=F*f;*2gReQezKQqNbU?=+ZCnE_g-zgV(!(}>8AeRyF`lbd2)ZG``rRN#QYHZ z`Id21*AuPz2~eTJ(N^|9m&&pp?hE$zi&U{^=Gwu%-D@=dHnbq zf^Q!Nc>D%>MeFgo*$29(Al#4nwg(MXeI@mOQtSWBV$Gk6NS>c6|81L;y z{{t?1Ue}WXouKEIT&DFw)p|ZNLh|4)Nsov<$Y?#E*59Q_uhV)yY6p({b^zKC@(f53xxcK)*???Unc6d?+$YW{$ zJ}vq@hwl%O-@Y?J^EA@;QNMk}*w@6R74*G)H%a{ZN_wB#PyT*oy-$AP9*nOUGv3f& z#9RFPe|~Aj<6#$)yMY%DA&pDybJB6AB*&fFml?kdE*;HV(pB$sx{&N~JK{Qd8t>S@ zM0BHxDy4TE*Z9-;{u9wvIV)scRnyT)4(a_)N$Lm6o$7uk{=NX!M|x-Oe4Zz}uI4yD zc~aBo62v!X9`+CVK5(CJmpUo<4DL@v4m&<-_?}4d{xg;0@zL>dr3Q%pUW(-w{O0eZ z`02_SkbP^o-}Z$>XUt#`uSpH0b6hXtk4LCH;*!k2CHP*}s{nVfpAB)8)Bpw$$5YZp z=If_5zdfB_CC<|NN80mQ-Jbr$CW&(<`MX~2dyTw79$DVfIgyLZW@(>%Un@=D*J3$R zMUEQe~>->K`zd0v*kSC*Hvv`SLE2Y)FoXQ@HiFP9pe08Np{ISHrlJo{%KTFHy_@@pO*|Ik zo9};KtJgv2I{!5DeZE)IkApzj#rnxeb!2}31!B##FFTjv@7WS#Nb^m(Ap_-*>!UuXPw`;SHOyS<4VpZoZ3 z`F+A2+ONQXJ5~G1`-IznX~v(}%}%Jt0?C>Gox=IuCrtH`T)QVV9k9P6R?gpL`4_Yf zM*5)NiR_nyAHn;pI-aZhLGuFE_t;;}en55iJ#?I5=mW-)Uva&mkM<+d^LwtC>Z=Pq zHq?n8YoA?u`1@|O9qEMl$I|BqA}<}jJRuIr{wl`RJ&?Bw`|vxkHdIe`@N<1qjh{j#*@FF{=cJq^wWPjD_!i?&#d&HIXODN zXvU%YQ+bExmV09BeSV_EzW5URSv*v}>!I@dbVBgh z^|27#ibE?M_s=>G$5lG76Z&UqK9xr+%)Y1;X0n&SKFoFgVD^FGvzKbP>tPSLc>PMj zUm(9@#QOg99QWmP0*}}qIC|{A#D9kxHN@`A=>(thY`Bx4+jhW9D13FQZ#_ri5xE4%L$V(3182Ufm3m)`r01;k5uHDY zAqiQ+pX47QIpz1WsJ!zy^6|UBj%fCG(EbpW>`!_O_fU@GFv&A%Ju-1p_Lp^CXZE#V zzXIb)>3v@4O*hq}-}_B`4)G6^2j&Yjd=2^4q&D~++qH6p2lkWJ)w+s8|Abc_zq<yTY2K>qiav73mdTx95E&V*k7PP*>b{C|k)7U)sCUpD{Z=@yi?=!X4k((VM>P3>>MI7IBY3$eW;kUz~=db0h> z5}(TXE%d#xbw~p zHuUCsJLVNMZ?984wd-AV!cY7sdu7E_=MBG(r`8R>9glo~w7KY=abO$JU@bckmp1C`LKSzSDt!2pU3-gX@&%)!}BIIJ~D4o?jS(m2QraA zmZcxCFG=Mo{vID@tHw`|eaX0YR{TapJ4r6#$nyGTn)d_I{@XUTPp-q?IqiN-^jg+ADf~4+K+I}ybH^4YPjb~?aw0v1cLmP zlJhPsP!Zfq%Xx@2*4+{7>-xU7Thv5?r>mQ9b^RNzCxW{zBt>mgZaCFwdYn z{;YWgy+faZ4SK!>e-C`?*`o7%peOnHS>L|%_kDZ*SmY#2_Eq@C_k5_;c;|V6@Vn=& z+D@)^hFJdKH;NplSRQ(gXgc}pp=mP@;J6400Mp-^2lTvJ+Lv|VH^a~GSzzpOD$V`r z&+vSq8?soSeqUnd8x4cb1oEF}_?xh<`npe=_}&nGr}J zs0P3H4#_9|&Fq$S+{{6Ty($ZN0)4yne75II;9|aH`1{5^AN2dN zj@#jxc4xM6`*iu9M>XAM(2il?RSmo!vFCuHr>u|nbn13zF{Efbj+piA496#X9`y4I z-Bj{Ip+|bF&;#X|9%)Svk>{S5Y5HenUO@C8;C^6!g>mlJYwnNfPx3hE-eve<+L1WR z;)mY1nodbO68gK(kV9@qLbn5&F2Wc2{*`|p4ct(nf8tEx3-<3bUy?jOhF;L??|_3Z z-TS0nK0gMu+|zyn>JRuL{Yz})c6ez|=$>FX)%?hU!hC)ro5eg@%O#id#}D=s5x!AF zcf4Og^Cy`fnfVr^A~i$(&B?ya(0*x2A2IuX^uB$lQOvLjkJ%?A`Hn~P%`~H5!#&@! z^dsqG(YICV$?n&iaRj-h_ExDUyD^UQBsW@5c0Zu;JxhAB3*+hp+LK)XJ(lnJLCN_T zG=S<3!DIc}5Zv->W!~EJy^{EO-V%bx;}6PP_{HxI)o;&>c-|U<+vUkFK%O9nJg*_T zIK(INuSkzT-q8Q=_K!eAx6nK1hY;XLU(XYIe%oNqtI7Pf=U+?qm*=-3czl0@@>Y9g zej6%p_gCl{l&=!MO8EAV&P|Zt#Pj;@XSBTf@f4?;`7QA1`K=qB-}Zc7kwLqC2-%Nx~S z%Y1Jc@C~CIdOntItTX<$D1P@vb;fVcXL9q5|B@(ve?Mon^K9#Wv20VF@REFl`AK># zUq^Yd=$&n@Gkz;RkfnR>=Cl2@{yCrc>0Ix8;-~Qy!jE_lzhgw7(sxC(v~M|tkM&zE z`o)$Py-rv+{bl2)d-dkA{qCpc8UH5*e|)?|+nZYWQ6fGg{D5D=aTw@}ACrB7-Hg1O zbo1w*Ft`>;K{yCg3y!GMV zHKPz6(_`t84)~|bSoa_4a(oNd0d_VVdS4laL zeg^P(J_T?~4oD2c^M@?m%UF)CHS*21@3Ucir;)C&)O7t~1-eQ<1G?2pSJHRo=vw1= zRy;2QH$|Ud5~THui6g57#|@X>FY_Sx4ydR+aw*04)BE#_6y7QRGFat){4v=-HpBbz zdq?Li=V;G~xcQ6j53MVQ^dsp{)|*+n|1{ql@W-A<*2+J!=RALAz3kI7+gG7pE*EN7 z1-ZdC3#rFT`_W z*G7Oh+njQq)OUM266`by* zdEdCJ_m$7kd3VC=uWRf0R%RjE3;VL3;~rodp9}HrE6EknOP`}Axe|NrI2^C+MNX1S ziU0H#E9l<<9?$OpZtGuLf7^0Q@Zpd=S>>~50v^&Ubc_yiOZ!MX=XGemjZel!;8(=A zH-KC9DJs7QpV7m8MzP}-6@zaO4jQMRo9v%RMdM4oJ4@0NY6^S{;PLGX;8y!2z7#5N z_n-Aj2wo+A8c$j;MDFG2U8Cbmw5~;ZBjHpNUxKvh_>y&RNAERS4mrLQk^{S6wj9Ox zE6|q~-F*F@M7xQ<+e*ZhSboqy@V74Krb^5M;^!TZC!Pm@4)WZspEv5~s62K1f^}L% z$)MpAg)VpX^x`@UvYE zm9G-NN`X}vSJ($UL+dqtZRig4bId;A8S-CFZ67e?0KBiqKH%QJ)BHNcb?IKLV^RO` zU5GTTLz({f;v8*((x0Vum)Qr>mbeSW)yQ8m`<}SH zcweXe{gu>ySR?&ij2Ql@QyO2@^6y?t-+2z}JM4SbdXM+jps}pSp|rI=#;fXHmh#uT z?0tc!+iy5&}vh6?P95wsLE>u4Bq?1eP{TpWeJgxm{eZME}K`iS}8@-hxKN@;5 z>PNG^!S`;J&XHuQU&FrIzORP-!;n5g&e-!| zat+Rr z`9g9@cF3&9oBR2AogC@?iB{nY=ILR+e0Pp~QA+L`M!ZA(WZB2o_bs0ve7>{qXBuBp z@4u1#mS}*EVnN!&(K3vu z03JV{0=PZyX&=V$vQ8hr4X1XT3%Ini%W5CZugEf!ooG-p@6z|xLXl%UQGkNaLz(k3 z9*XdO#O!{K4|%TG`EM?~ThDX8Nc*p{=QOf^0=)P0IjU5n#0QTr5P4GS1DviJ-_G}n z3@#A3Qt#ySwUR&UEimV?B>&rVdd^ROG3UQEWy)W`?>n*oi{w_Rhv_}-CGVzpSXYjB zErVXnliu)BoUZ;p%lXr)QQ&Jt-tey5yCBQ@MZK$0_TMh}c0qN_Rowd}wA^Iy8WfBx%MDL?D{ z*R-5xn%lXoezf2O=z#NIW%0YKvN@X1^*3pI;@@8}$9i-eDv>K+kLpk5gbu0d`Z1{c zF@kZqFV%iwI_ekpU*qc`+i&2wR|p=f=R$BxkBMIxg4_7n?+n4K#2>_^ z!N4my?gS?Q{CQ0HqvN&WU)=<%sKfw`@36dhTJ9v>QOCOpPyBhS{>w!!WItN}ZYXl>M&g4`~AWt54jJ;z>jovtR7Fc+X>8 zU{2yd@8I8v-&eK&V(>nv&UmeMyKk#Aev0FT=;FS0p7BrC8NW@R2kMO9rqBI##&5U( z*!<$pF0V8G5`KGpd*}mY1~1dMBjhgdiTHkqw{Y!aMlawY5kahJ4Fgm?xB$N z-se$2y_F^P8JU>w5Ik0&h2XY6)9W_a!lUiHS#M1Yh&^`rT)i5A{|Ecpk2vTN=$Y(q zz&_{k9MR{W=pMa2XuS&T_6Wvfc1FWH>^4@SVDf1kaTtWK1{|Rl^(@CBuU|u19 zglGJhG@rBR|30c^J(p3bc2w7p8Dal9E$3d~kxITdh@Y1|4w8~yP-n}zFO@EepP9Lo z>qVa;_%cy^+2>>4PktKaOXxTG_2RGWl=DLH6POO>d<3K+!Ttp3K=X3=A;yo8bmK>i z$b1O-+26?O{i`#?k73`h0CU-Ykmpt+E*K6wYQ>*GE`XLJo$e2v;C7N6QEAV4gYx4| z*&(0`$rD`*iE{pZ+OyV8d3_`#KUK>Y>VsVL|BJTIH*@_fO3E|ok>y?hkFPI)+wv^s zOOF>`R}S^ZF3)}s(G7fuYt%3-_!NrEWmm}l9024yVkR!x??mHYm`_lA!&_?jjq%it zQl}XI@)jGt0s=w5dC&FzUYTcLT%MNrmp&IVxA+&v1=}(2eEqz@vQt7|^o@Rq{KL=f z;8%SQi%Ex^5&bCNdFcNohzAYO8|gQlel9)nZt~&XTZ?>WR?eqX%R(yWy%_mP?^VLP zfh~%TKcSre{c~zVxxbV6D*;Crb9~+!&bPiWy0^H~n;!S3iqqa$@n-La z!t{r_$XRdzj@BQvWUwJfn{7Ln`tsgsj z&I@1p(=Vo8_WN&}m{lg6aCG%Z_;uFrqvh*Q_G~ydt4t^%IoMtv{?TK{rw?tKreJcZ zzv7|djsw%hQed^?z~p3cYWWsST>f_gblQd~JSju58Zc_j(oJWx=ZX;m5ZYr>9B%kY(Sn(z{{r_>Mhc zS?t-hqkTlLc7c=|f=Rw6|+?a*DLb<@v3bZ{N6n{nmBcLZyPA zYxCRk+c&Jc>auMJ8RzLk>YQZj!z{eESNe`U${g0Q1#zNMHQz4aFc&-@RHSX{(`1o!oZah&vK=^ia@Wn}4cy)p-i?K+8$&flf95A9ieu~`KycxOo6%!(Wxl=n zmFD*xpM&9ilSSC{F;dMFv3?j9^UD5#K37jce8x)9XJWj({X}_))H?R^;sk03lxO~i z(UNUI`RC9itJELh+s3s5iOW-#eNTV*cw8Ajt>x03_@`(&+3y2;r&oDX2X^czP8pjp z8y==3VDBn#cX4{%Bn-<5b&Txj9-&-+yynWzDVK;O?#?Ojq zrfVAB^wDTa^et?^fN8^umjG9BY&Tq*4J$W{@9vFepY;}FA9CSL2X87Z+m!F>z--OZ zO$6VtVihEhcyt*&trvMr-G@-EEpy1zj=Sz0Z^Da6kEz(I*zH8I%KfikGpS} zr)VS?Yli8}L26M6{F40ihacfgXm+OKfrV_RW70$2K2DSieofIo>!qx_dO_*WI+I z`~cpybC-`R?#H!%yv65b96YFBw3wYoQm+`h4B8EIM{?Z{jE+rTbb-zohR@c1II?LY zwBDPocl&jronXBWJ^6_bTS=8(BjYRRKY2Rd*24&K?DN))k4+a3k+)6@WvBX=9Bds~ zQlW6V0{rT+J!9iHkI}Sg`tZc;(Y+zRb=&s!Th^`HzIDUKZ55DNVE7+iiTItb!>7uJ zXH;RTtol@i^6s~qs9>P{_>vz8h<3t($@4W~eosWkL_r2m^<=ylf5Kt5^yjz)SDH2wDT-S_A- z?t}k$-P)t~e(v_aSunYEPxt2AHa>FVsfcEgxCT52($( z(25kIm>z{6H(l5_ArV(`=6>hiPyaBcn4hHEKOtyBIJ7U=3EJYE|f~!Jtg|*HK=k4X4QOqpLq;5(EAk z60uu5#r;I4r6yyEC+4HO3Zs*OWcVXj3IKl){A1CHX4pDinB)LdU_qJ?l()a-a6fQY&-co~lXnh&C|fQAV3em^mX2~jz*;K6%QCk@*>C)?>xump{9|m( zEc=JG^s;|CG6D4iNuMt3O3lUt=k+Du_-5(g&g>GAl&{~{en+H!xVMoEc-gRH$COyKRnDI^gS>@+8rGn@5csZz2zjfu->#ka}eNBGbn#;Fe zwJyH}KR0X$${%?peZQvkAEGG*{`}ydzVZ2=%H4A8Pi}c^>0kZeYyS`P CQ2T@c literal 0 HcmV?d00001 diff --git a/etc/multivm_bootloaders/vm_gateway/proved_batch.yul/proved_batch.yul.zbin b/etc/multivm_bootloaders/vm_gateway/proved_batch.yul/proved_batch.yul.zbin new file mode 100644 index 0000000000000000000000000000000000000000..2506ce065d74fc6a610beac3843b6c147c63cf10 GIT binary patch literal 71904 zcmeHw3w#|%b@%Lj?aHzxS+-|?Zd zudeTv!;c@zFW$R5GiPSb`<$7bRT)Kp8P%t+xMUsGgUfJsZDv9#_wMXk=XT}b$-3p zC+n*_l#o%6I4&B_3PEw0MAjrI!|Tk3B&Ef&oy+D;dULL zm3h1G1L{ijT=}`P)Cyb+TtBCVbLT7SF|WrpgnOkfP#t_7QETby-tFl2nEz6`m-(X~ zXNK^9-0?FVcz&PL;XJ6+Oe^{u>@2Rl0W)Y9d}p_#UZ10~I~gku(`VS}5W1-j=T3&( z!Si#3ZXJ&bp4<;h|L*<4JUTHC<^HXH{t@f>N6mPZgL=!|kD>1^YK05B;JVm7&R3eZ z;B7hTGu}F#+y)*m(VJ$*eBz!Uc%AE4zLe*q+`o$Vr?`Ej{GC>L9_$ zKJcd2MB3L0pYX2sVm7_P9}B>9W#Ydcf{QEY2tHT9kiK`$Aii68ot7Il;70HQE^<25 zttkI%U0!r1z^?^B5V?Zj5j^)na0TRK;U~4641?Ot*O`niXE|E<0$o1C<%?)Q%#TXf zyC);{Ms&SnTyNp5dY{Tly=}VQ@htVX0QIs0FPG(hKb>VcT?n0Vl<@Fyiyl|LMVAlp zxIZlA;94pNatxfTz$ScBZBfl}Rgu~` z?gZ<>Yf@X&|n%%AQag1hJ< z!;h}ub(P2SQh-+`c;{>UhIqO?jwg3i@Km^6@C24+JQXya3XJ#G;|5O!jVFvhhNr@n zf~Uew8c!2(JfWQM^i2y-0|B0HHh3DKaT8u>9KIgED)4+mJYk%`L&j%3Ek{=#G7 zt8W-Q2|u|HfsQ1H|DoxUFANj?6~Wtx=RY-kLz@`-X+DR@GM_KF$D-dWgnr&VGCtsw>E~(sc{6c7_im$kI?9_7 z`n71d-Yr5u?+rpf$iK)H%fC`62mKsin)oW<^8u&;uBZ7Je6HztWt@JV&^O?7jFad$ zjByOACBT!X`th&Qa@B|DD98O_n%5#|K*q1~v_5Fzc{#9oG#+*S0)hkjwHiF#AIDSc zHG(Jioq{KfoAFdtA80&*|84V;8c+Ae@q}{1)44XDO#7BMQ2(S41WzqD z3!YkUO0WWXz#p3-)C}s-|;5F`^PVZy+L`$B^tiUWT}0}i=_Oq z$1)vUzDLI8ek-u&J!^`G}XuRmB$X8X!i9IM>uJz#MnWqt0wfG&Q z2bu+VZ-dO1i0&(ZXOug`jxYQtehQv2oVFhd9A~CV`5Sflu`1>0>je67j#nuM?WkNW zi;oljSx0Q#-xYpo`!nHR%u?*^0r)+Dkr~!>`L(_`bb3tg4Ie-D7~oQUln|XWOG% zuetGZz!N_HzHa~9Di4>};Nu&9v-lX+MC{H<>g5dVUy1aF*2|fvKE8aM*)T^wzOf1) zUmEi9QJPnsd~Ea@>Giv)z0vD4PO@Gk@eYFaBA$#tJ zdb@;nZGR#B?tUe+mi?lyf}>z-I`+o>lZ>&0t6(7*j`m~Dk$M5%UC;lzrUh)rZ^n=RX@5i3E2hV>HdrqE`)c+yq$Z*)t;2s&ykI;Xqb3@g9 z66a+i<;nS;u)Up6ss+CT{jZrv`0euyzpX*|j}rbtC5RCFpC%7x{C36^(?p67#m zlNW8J{&7E|%XvJwzj2O!|No2SNzc@IQUKNdugW>%M?0KXMt(XqPr7J_wg-##xWL%C zvk_w&zK;75As_x1>X$m#)!@UhJed!}a+?o-S_6J3jW^PNo$xuY8S(d(#APIxoY&1> zqw(@<(Wiw4qHk0$`i1UnU#IVV+>^f4b_BmTzpT29-jF<`>dOn=Rpv6*A38soU6mEP zwcD4{is^- z;h7MARCbf#E35I9)%e2t5aH`748KG9`0w{&UwmsN-GEAkBNAGmh|e<}ru!LZLjHc0nGYB=z;Z@#j?bQjurpG(Den!k@qk z(FJZvk>;iG7s@lrd0blms5m~{hq6rPP@V%ib?SZwQu_(y+3F`;M_jMr@Oq6h_=o<} z^O;*ezL96EAC+kie357H1@t`f+3m=mGXD|Z0e|i;Bon*B3e&LI4>g9fGZ z6qt(hvI}vgbqFNU@P9wDd1ifd!Rik6Y1}PXU*vUK$PKqYUFLU2hf}0{%NZRxbxi!q z9NH`Q*Wi;<=UG+lEZgTud9q#!+uM5Ou^RB%UpaaB9tmvP_(gApw`$YlTaD-EQ*m~yx|pt+UABDrB*&C# z<~Sp(j`whU!TJdLc}QJF<=IYY4}HM(Fi-Fx#Us)lKf?BymFP26NX;jEg_vKtZkAtZ z`H|?4_4T5E4)is_C;8xb$nyyQolmdIw?g-@Jelrcxy84N{3XKI3iPvvFfIDx2IkhQCbt;5GPPi@qcLr~R)`kJ$Sx#QzG*lld(y zxA@H!|0^uF;d5U86{&I)Hv{KIJ@OL@(D#Cq;B(dzuq}H}HS+#{bPn?NaW)3_lI*Q18I_)%r`> zt}6JWuv2aKu{?L7fcBBW`vOI1o-4#|J4NIz2idM@{cikO_)m=QOsb#SwqEp+Gt6*k z{d_hY8V7VqiS%l>$e~*3FrH~x=!fvnd4c?ABp&z3ens)#&ExG>3+D _42u{WYDg z;vdjF#eP$MCCzVZP@j5BYQPsh4fR1XpN8d@K2hCB<$zDUrOu<|ueHBQ`)OUcFA*^% zUd43T4`28UO@G0|0)q$bUw}b0U$Eb|;O5@1sdlqJ5&XIk%~Te0DS4$pz80cHsWTJg zQ2h2#AKLRHIU5Y%DA$^wNBEgf`jCE1gWq7>L2L$p5yD0MhIwAc`fj0#t3BEmnbyDL zaYOps_>+B;VY!Vz%G;*FZ*+b^+~G*vL7^b@RUWpWOnPRa=BGBt7r%bC9ut4w!~+h+ z1Nd9LPV3PPqDO^)JD}N45L}8wNIx%dd^L)za+Yd6SrWaBa)M*!4V36F)qXL

+$< z!>!Pp1W#qxo47*S$Ma(Cm>2g?@&p)=uUUFIVLr6)i|8Nby=;3`D_?J|cX*VCEm8kE z4@+^#UxkMTwGC%cC~Cy#Zi>UbdkD`g&DKl5{_ zPi#CkX!pI91fRwUf9WixW+=WUd>Q}Atb?qcn?DuAn+rfM$^+W|RF!yh;kg>WWPd=9 zmY3*zjwfk&dOvcH;|t%;=EEY5Tl>3h$2H$B(Q*^ScZjpeFZi6Mx901Xm!au-Xy2pj zccLGnXA9K}@{&D)9(yL?ErqTY|GLR~EaVHDf9s8NedwWAVEjtrTxHfbC7xV}0``yG zpJ#lbcV6~e{~|L)cGUd^;vVcDBnZ*PB0bcJ6mxjeFo;hGtz7IjvK>F6)WCV1 zUp4*l_}V|o&z1N47-yQ^7XAzMax#B}t+ymdbJSKWRKzl7z<{e|Urf08Fv zyv_mr?-e1UCBn~1=1Vib%a@9s)A@*Sed{+< $*1a2fZL;IR`mU3kA$cZ$i=@x& zV<<{~*((`8z~S{<5>MHe4m|-lxR>_FgM2jCn;|BacI zbQe4-a*TBv8fTS#=`F$gFUkAUUFd@n=>xqF(jC%pgMADI*?+G0tGD>MqS$R$@3(OE z{&TY*9q(sBuY~*53wj@WUhiwqA57n;F6-UxWj&4}^wO>HBX1JB+~Pz3Mcy~9nICO5 zA7kf786NOu{Cmt)=x+F@%KHedF}gc|K>PCh=&HycsN&bVe%I;M;q=^LP#VX=Yyv@_4Lo+KvTzJYVo$C6AW_UEuGW{ol}gw<=o4;QXzN`1~&D z0q+8$TO_|`^MQ;zw5Q2@7M9z5V(cx^k^HU>I45fs9PKP|-Y{HO*-~R14X&$@*gVQ}eT4(4qaVVBc3}vtGZRi$6%?7XOU!uov<8vjRTf!0|cmkDdp8aUJ`A z-9g+f`NJnw)guyjhvmui4a+V1depwu8J64pBl(uFyiWKE<`VM__|`jJ$7^PNqQ|K( zZ`g_bCm&{b5|?$ZFRGsdyE^~f_K4QM7xKKqxW=BB@MBn>Jg>0a!kg;T@<4G5$;C3s zhY?-bKGVEqy(gX*wBUFb@i;^O+8!tPK9)~Q9%tCzg0JO9=O-CYBuAm#*3zpD;;H8P zh;7G`=a)hU;0O5kx`6)@&990cHG0hCyV|upcWVE?kM-J(%Wr4B7J&moJ{#~SMPA+e z(T@93OL%`qw?lCzt^0{zK<_hz{NBwP3+pC$TbK2&XV`t(FlU zG2e@2y^8GI0NJUztyhJ7no6$)FYSIbZhwOH&Mwe{rt(f_l+Cuw$4<{jF> zWIhbb<8szTV#@1gaX4Mf8AeUfXY?$l% zKnpV1CrECd!*~kuY~g8!*DoS|ew}pC`iRgX?-j@2LksnqLPyYpEnm}qOMo}vYp)_X zW52ZJ8!`fIi9OoMiyrkz;|Jg#W~Pu!mMSh}4Ne$aXh{fR%YU~U*K^&bI~WnFB~6` z{5Ujj%G)DOX#a(t2kkTAer3O>*|&>kypO&EXe#l1u`l#uv{xCCSM2RC({nu^X9jw1 zkjr^J#9N8^mFOPlTwy)b8$dqicHHHZ?DIWU>!9YnE#p_iZu59f!@Se-*TT=C9Z8m# zu-ulH)dZjCFYm!=IS=w4Y)4UU@WB1y{(?EK51+2{`yS_$X0H#MeRepei}~2fE7|+) z+McWFsrh5U9}0iCA2IXL{9*DqX8isf$NNM*-Z{S4;?6pE4D)1sI)}R#KF)<$?}y$p5mNjb`n}2aBrLb(kMoFO`zrfd!uFPYV*euRy>MTP-mkgBDe}IS6?o5t z^kT4o)9^p`3&CG$|27l}@a*1$`gh>|N0N6_ty!Z_^}Z(bLv+FWkgPA&zy`fPN#37O z8D2N+SjBYfLywG4Z^-aF^d&Nn%=@5= z5p$H!OWCi{=sq0LyCL0dx{LoGmfLhS`d;rB2j=8`haBiI*m-7<@3Pww;z1|xGSn;K z#^|{kez>x#C1oH6eS82w21q`xiwI9I;&jDKaua|6o{Cad3P zsG=uDKcW_`7Y=rwAMl;cH^x5GMpMWt;Z5QVhtADd&JQ3zRnEXeh3@HsCR>A%_VS&z;md|59!dHg&|_y;wN z-7)b1$q{%z*k`|5--}%c;{{sJA$}jxby!bXfo=F-{gddu$C%viPkoi6!^PLsPWy${oRpII5 z+yLbU=Q?XYHy1DW4$UvV*dHJ4Jg=I*^sW|sdWSKtk{&nZ6^v0zm7h<2t;vZ^z zc&$}FB5@=3`7j)cgJqwNlbIndS6Q8R#lD+6_+FkL&(Qe?vcG_?%He%H-gU7!ym9{g zSzzbH9-yNf`Ez7fpAI^`8~q`V;=V`vMFHm*+4$U=3~bHg(kzd@Z*ilGyF=E;d6Ww!Oy=p8NQ95jZKDcjo%w= zGJJdd&uB7yd;I4#8NNOKv(w>we<1py{Zq;E?fqVQdENTOl231U`uOUGzY6do^6!l{ z89w{#q5ODzn+%`w64CKr)nxdzz7U0fWeh&~OXB~3oSsMQtnK%RA0FD_WIsGCx8fT2 z6E)yV91*s!6Mn;XkmeV`ll#dSo`9wX?JVUBqwqh|WcZYykHY`^Cc~%qt&ovporRUxMGr#b?T}_6ciQ~t+qRH?n&W+;#AL8)c zTKbLh(`zF7uTK5r?WF!9?JfO@uZzUX>xAF1{?Y!iSB~SQVYrk}h~oX9nhf9aAKcHz z;8Xr8hj^dt80VGUA5(efU=S}_bkO@9!t!J~gyps!s|CLU{jZ7i-)#7#=jRhX`8y53 zpT$ocP@Kr4F8YdwYtDaKk@cxRg>%U`es{1Bf#K!!dz-nX z^4)~oQu&SozCVPVm4ke~d|x5EM4mfKq#oW2elw4U&oQO`5I@i2Pj5#J{zMO0cL@2f zPX4j+?>(*br*WxxweLW2+Nc4AC$L$ zS>W1uuNlYa{Rr(?_X3}g{-XDPDNj%OjQEE2nSQV5DOJZSesfr!Jl?R}(r5bIm0&)z z&c~r~4o2{0x3~Oa59c({_vr8^`&-Purd3W{-g`yf+prU<8T{6UH$qDhdG9>KlK0;E zm3Qh7bi`ksaf0T9YWX;C9q}vjmM5E!v*Fu**xDw;x8u#HrNj4L7|S=z=Km*9`_6N! z@;~|YYa->z{2#Wr_}8liU*s!nUnhK z|E%=>A?GnarKVo7^Isd94Br~R`>Q6yx5n@Oca!1UeD%Es;oE+Tm1pqIY%+Wc{~p=> zQdG~z<-*$(mkXTJzEE!Y-f|pU7~(inx#j5h`tV)EVI9XF;{BWs zzTZdsUlq3Se$r74L*Vd!p!OMKx6O9}9ew_SK1U*mi}U*Yh5YII`+ukFc$N1_IV$5z z9P13|=L0&9y;R4sOKChNj&eVU_bg=HTfRR%>~LJ}ehBd}@U-%ESdU&yK4?01K8tL4`>%!igADdUamgF5-q#&@@c?-!)wyKXtM`SJM;!nftg zUblQwli}O)x~9qSt@S>S_RW==NMFtaN95^}Cc`KBh{&t^jV8ml*USF7$?)y*f4#}@ zEq&=e++_In`2Q^#e#eb*zumq+>3v(4e7j#u?*F9W+w=R^Cd0Svjr&NG;oIZ?W|QGl zoE(uq?~j@c-_{?`wcz9RXxi6%()6|U-%OL?lRk~&=Z|CXDZfqUUn=(<^t}4IsqAl# zlqdTE;l2^e4{-mw27Kbf2G)J?+CgJGSPEQq&yi< z;XZU5Pqp9^JsW^O%OCqd+&>J?1-8}+79&q0ag?Rc~hd`QcOk&8c_B^OUPC32z9KhSa!u7mEV!KVu4jlQoJ*+)NX z-Mq>==Hho(!GFE_>Fz4}>0`6#Ft7SauVaRIpFYKSKWN#7YIy%flkkr9`$W6&gU1E$ zP|FSG6-@r-8lqd^M}+iz$tkAiOXo($u^`5D(Xv6pC=qqeK+MN?`hy6n_II!>W`}3^( z^R4@$xhICqJ&jKlqn+Z5ai3BB;rVm+d)zbR4+rNEqPBidajx(6xv}@UyMH*>_qsbr zqVIKgf4|y(0U2MUJkg(tlv{o>(82G4;VS3Y?#Fyfn5WtALU}QI-||$=`-bTI-H~zI zehQB(Qr_sikJ7xme^3q2geTP#DNpV%Y;WV4@~?zPdT(!#fZ_js{Hx?UCEoSaKi%^_ zqa{?pq%H2wFN#{E-J;*xNf%v;{oyHH)=WJtN zWKq_W3VNNVpwG`X=V`mLE~H#He%`h@hdZy&<<3|1dXMa*q_|*};2Y;p<1feS!RGq_ zU0*Zv!udfMXV?GgdrdFLXL_NXrqhivI^nz$+=uc3q-(mZ7P{e_qq@$42;Rr%_f`Y? zp%Z=|AB%7L{p4Zn^JF^mcLfXPd^E)Ge2z)i&x3iz`PSk~?`1Tfh#c8+MyTtFlqd6j zsGsVE&-j}=e0}~V_%^?m_-j_&LEj0)Ss?n{besaH=|S)R!uGTjq3?(fhe%%tgZ$k>E!X-zUFBTL@&dxpK7`;qvyijs z_jfXnf&1}3Z`Z5j{J)Gse}ig4I1cl@!^~v@FZR6wI!{rbPn4)(7oybJPul<;cyBhz`pq$gBn);UQ<&@GU^_gnoJxqlp1XXv8m zY0jBdnb`N-gYStiMLvb(5Z~ji=exUIC#?R5xqo+7|JC}fx4(yC{bivaeD!M34s6Be z-eB5v?u=TZ=ab9w_W+iRi#$Vb6aRscpb9Wwe7|8x*0HtzR=IqU@LtsVGJmFc@2-6&h@XYmok{(V8nh}dP3z; zz89Za!_MI{@sU}dBmE1x=q_r!;anuilR=)~LwYaAEU=E>4V;`nJ1<>7gFVm>@yTk* zBS9=l4+Q5+%+=1;(9-~D+gr6(fB#kNF74014daG=p!s^YU>s@whl~?S132%G;4xVr zSNB5*4$BGaBl0suUoAHBk`0>3y zo-e=WKy(&8On!f$r^)Uwy@GVi%+#(3BTKD+m%+>6~?dF1=D}7yoYuq(0k6h==aLWj+DSxYJq_t*pFoWS+5?uQ_EvZI(+lJ za2o&m6R{WHL*B&RA#VVA?=t!y<)Y_x zKe?<=dTuG^SO%Wz*Yky8(qr!3`C+jK1+C}v!=mT&TS$*X&&Tb+@xTsXpA_`U4J=m} zeB${}M(>Lror^x7(Q?K5E26jNORjGHom>eY&e8AsZI!FVak=V(#y>&&N5RgmgB(WQ zZ#gCU{?)(Z%ik`(%ou$?>-#kc`kudUApX74``X{q{{7N_uci0%qR)#t*>^jVp*W57 zecW$H{37~3v#Ex@m+!cVUtde_)A-5XudVmV-+~|Cb%l{P%olO5+1KH{*2>-7p%s=_M_Z^?b{vGl@jYqtPEBl%AS(*pho$UT&{@w}o=cL+qo6hrO*VP>36TBe{ONS2$n}eAe~i zbUw?gEa$T@6U}G2)%tzy)pOkEZr<0H@9Lz-+e>)m<4n-)@rloXSFQi_`TN}?24DL8 z{eo3KVanya{w}<0Pkd>gk7~XX2X!Lv^RRvc9armnrQu(o_mRvs{*0ZI8stH$`NZ&EUsc$EI=@E_#Sl^%A z`5g3F!;-+^eR$pY&2UY=BR{|I>r;P(AN(8BD|O@JsZGMi6CPiD1p5-T9qImF75v;R z{L&rp6Y`MkuR^X!zG}P|*S*8wt1l6fDJ{a*8ao6Q&9 zSQj9DgSbxQ z4daa8{S^G;X+N(!$+}E2w$4QD___Cb;)CU{P2Z;zDNkG^Qu>eL2fcGkjLi{+&B4 zc?|D>#H*gC>QInfE@rnL3a7BqV{WW z&;FI6)AD{9Z(uy@8I+0va7N_m-}ePd6?{Jv>usxEXVs7mP3y2!g!!X zHJqH@=Y`pj->vlhs$5a}LH+~n!H64l`DV(a5JCAK+qH@`9y%WYe6##Q!GFe2hwna= zXFIoiyVaYfc6rF=NA^;7;V$xmm^dZd)c)+1-hUtkARH95q132$=F-11jf zlS6!O267WAPnN^5y)B2*K9o~?-yH2<8H~tzygh^#fBp0i;~kapqaWkBj`dBjpUkuzea-a0knvWXJ~sW5@fNnX@P>USGVTnpq9T7zsd6r%kBAU z{4RSTCT#Q<$L-K>h}-Lq2kd;C&ZjQ_vnIi>l24s4_$Hs)H25}tbUw9d@NN9)d}`C+ z+vC^y)TY6=-#_mqd51J6zOM>Bf0Xn*?1NlitFN!q*VoHc%k$HCKQ1{>5JpIHKbSur zH!1gi6j6VQ{j(POfqGfxEB>A$qE+K3@IGXP_kqQ4M6>`q4KA1GdEO61`)@nZ9*V@_ z?=SbfQ}o*M_r&D^%c!jPl+LsELKmJOd9>Ooby@~1e%OGGp_9H;C5P{ovxNMVhruf?jpNM@gmh)n>!y@tp||5U_XNV{28jR{Rr2@U08-wgS`)G ze;yUU5ah26S9>1<3sh9@=jDBfJl5SOG0rb&yG28!@;cvxSpFGIhw!rs>zCw*eN)4G zoZ_wJ-_i8acmzJJcq^mhE%c{;BE$fUe=82?Ma)3s^X`D(jD5j9 zKQQ(w2#NB(SU)2JB0)Be{uZ;2c=8c>c z_`B=9cLwv=!sGDGxC=XYe7b$_J2c*AFpfcj@A7`c-sc;9%KCWE&-A!k&=JYwuvyP8 zaDKA)RY85ho64Rg_{eV;e4riUBd_rx{M?HgJiJDQi0ddK7S zX4+Uke#+91q>n}4)~P3Z@cw<7=W%j+vIla^c+h&X=SB_h@zRr!s}q=y_h!;-ym0 zqDQ1Wd0t_A3vb4sgInk`T|J3!d;ZhdA)PNW{zBl_HEDkfSz1LE;n6_h`Qie(hfo{`)Sj>Q7KtJ(2Q6eHw z4fvcNZvZ}@yBKM2`;m=~e;bV-^wIiDF+AKgw0_<9thugVuUN1B%I7d1FRIE1GS9F) znNDH3#RtTO6p!L6=gqu(p6OJCrE8D{qer-^m6~|?u zqUaL@L0Z3&g-RNcxlYrg!f=(~tZGj-OdC2l({hJl%Jp zE1?U&H-d9a_&KI6>jCB*4?kvClzJHXBjm@t_h$9`XMoc~*vhJ5!1dmjljnGUs{8=% z@6-2@ye-#ze@55K6{#M~)dt!Zfb<}bbEc5x_kYODMd70!?=PXm1LqO5$T#Bs>0qC` zlUF{)1IW|yz6kIsW}rNr)5`Oud=~C8;3eE2;Im#j9z0*i&z(H)8#nu?XXt%*f*Y)B z>-<(>k=U<4Wj_K$pkd~x36PUHf`E)$wLU%BGnwVqgrrlr0MYv|vwJXueK z<+lE{^>>mT8;tOi)!zFck1y~O=rua!*UxRoL`Exx987#GE!bAe3B=v7h>mf^qsEr zOSG;im**Zb#oUG#_$)DZ&T#ylg&7o>!=sEW8E!KZ|jLzI|s`$tyAc zV1A%)Q{J1Z5(gx&(}JIlk~{+sa=lnzpQW$Qma87$)1Y5I1b!;@y|^mBflc8@hvmt5 z3(IZ1QND7p?~elbb~)n-YejyXhqHpGU_A-tJ=9*-m#|-Qt&BH2MR+y)B>TgClDd7cPx3}-KWm@lwCwwv z+y2G=3uS!RCz%T0eW-=^N%n8oav7Y@H^+W<>VAsp{itxiXMOY7tKko4^gM!nsn-jh z^!=)!d{X0Sy)NhVY7z#%$9}FN@sA_xhyAiY63Q1c(ob~Dd`a46_51!PB6>nUOFQh_ zz76!hLB@seU@1*s@hfszkA{EJ_lWSBSr}kI)JTB~a zSR31Sd0f^xF(5gwOJ(w^XXR_G^*q2a`yCXWM+IfVehU6Ake>r&nEA3@<#N^E|7LAh z^KrY{e`lbF6755zJWo$>c8$R2GNO_&`L;no~d^Ybp;dPsS_=Kbfv|j^k zIQ7_7D9hx1iT$q>zVO(t_TN<%Z=~PXM9P!r6}GqV7VJyRVcg^w=zWQb{Txj3G-GFF zT^M{#nt=YAV_)sO=SMJ~68e23Iv~7~eQ6{QZ`OnEZ!%na+#8z=pZq}5FJ}Dued?y+ z#e#44|1}N1jUT=LuW9hD@tgOln+D$=zux~hZ}9)N$^287-wJ(wRPx#Vcaz*fpTzIq zYTf^(b$_?HXT9(=tsgURkH}!k`&yD0`6$hE<&moTj7&^Vq&!idMape`rsEr=y0x7* zd5O$`*kgy^KT!kl|6pHNt`z;|eGK-wgl$#BdY>SjHvyl)&LgzU=<*`UA)hVBsCHV+x2{w^9aDM_z`}tD0V*2`;k_{M=Y_PE2vx-x^oM|yx%A<`@>*Z zxL*I)<$aiz<3K6t1@#2kABpoz#Lp~jVt*3r12K4oI6Ut-@)_7qv~m;AoBVq5S4L!C z9sC5wgV{#}Zpg4dp(Nh{Kg9SE^4$0l!;&vSefB536FT3<`zQmy0>owiL9W{Y6!+i{ zW#v17E)2tU#BDQFj*J2xx8!0~m2f9mEh^as9J ziLsmj-DLmG%EMLpne@n-NO^L9VSAgOrG0gIp}2Z5G9SCWiH9A~o%nr7`zvHuBYAbg zY*XT8I?pL;w?2vc68e1(Ism?i+zr|Dj0OLJCd0S&zV=7vZTz2*@h9gKX5-8IxX2Ys z)F$&0An%2%O2(X$rZ*RVxVD_@I#aRx{Do;%vk>F+W8RjG)n zrvFh}`n#e$RoPmpq*ty|n*G$VVwEC(jPl_c<;S+XU}($c9a}Egxv8{$%g(J^w%841 zm{tG9=Izu!@8d^~rB%@L_eaNejgL)@PEA+Frqk-9EQYMi&!2y!Mt!^Z#^{5s<x9NNc_V1^d*{vszmtCa-nqk{{Nh&U zrvEwZ+*iN!CGY+EgW2QXQ6Jd;nd{GZ=CwcnYVO4^d)q`(v*6kF=MD$IlYU&E-hZ}t z+p(l(=2>=iS9P#QLpgdi+9tNdK?Ktd@pn^Zn&HT|R=0w2UB`~&tFGohl_UH4TFNs)F*}nPv2T3W-oSXGb>Uxr<8MFWPy@)~(yO?1;1qe_l}9QQEm}%X2T> zaZ&g%@(aH1$^W%{;o-E#xli1eRt)#|M-PvVhq^kc^7P>yrW40RVeK*RXl$+ z)w=ttwAMV85KOX()@uW7v zv-V9z@jo$gvi}G7O>gw44({4jnKDk)?D9CCDZbGR5`c~V(Ee-HD*F%a+vxA9OmCUo zGqoM8Fb!;0tsDi6--!Qa_KzXemhv}Q*?n+q($(s>bg4^Z-Pgz7jrQ1WEf+bT4&0b!B>bVR~XTI^9`g zoeD#pS!ML#z|{v*1*AN6II*31Ncx5Kf_Y$F$A0{W8}IwY9rqu6tmnYDe_YNj9K3hW zHf;+|MGd;|MCOiPn$(^Q4j`IWVe$>f^xBMmzVdA!pxYsB?_b3@Be({tzSu! zc|7~2X#*EL^>3Tx`*wFqYf>&?9^xa8h z3qVBm+cccpOD0G6j7I#jtM;Y~@a~b_0j#7S&;Rqw0$L`Ohy9Bdv+Jz=&gVXAKdW&c z}7u6`w*C>D9*xs@6tH&tpnLaWxTe`QEw(r=v^|CEnc5dHx z$&MPptYY{lewF-Ppu?xq(?6;_m8w2fIX(PV6EzG}kiQBN74b;~<@?Kr5m=5-j!qvz zJ|8h9I099p<C0(69%H2kP#Cz9xaa?Rz2b; zJPsa(@x(`~DQVi3h@bjk2`zIu0E6(0^Z~DSd>^9MRJ^l#+ z6M><9#olP;gx`_=d0`;hcS7~pqnkeXzn8}z*f-aod(3_myTAV2hjm_h_c;8`i0`RY zO`v9C&4QcjdATvT4SrmI?nlh;kiG^*?AA{4JW*+dsaWNTcyxDpbTZ_OKysx4;D^aS zW}SG2?bGE+P9TMbGS<`Iw*ON{?16O?Q}$LF-H!d+*ZqXY!J~=E%9*x^dJ6{ zKleS_aFX^%b<)bQ{t(XG-!;B}VqYcdMC~i5+ut6SFHX>OIih#QJu6Z z-D!m=Yp+qBO57T4zx9*l-#$>oKgRaZ)IY50r~c{81=J5XeLB^Zng!>ZcXZtnYY+F- zuC#5}(nsxQ(R=O2!n4)#87-{w`={T1xqdT2V` zfiQCl>?DP-SBwVh6NeJBX5kPHO&<(z5`G<;PLCfC?N6Uygn!m0@n03Q9*l(CikQI; zO5_KuI;&s!cko?eu0K~W<+Clnv=8Ht(dntmzFD-a;+6!Xfbu(7x8l{UU;KRMLVCn zrF0p7Zrf&BV+Y~2%TnbZ*_G)_CXl|Fp7aOKg9MFL5Qtw8tZ830n$G{b_NI$C94fKD z(S@jRc&Wd>(|u@VIgpg(sx{WnewEy+n{~ImXCe*JxhLh+7+AMRQ%aXp8UkKHmtdI)3(db{`;rA@c#qCLjL~% literal 0 HcmV?d00001 diff --git a/prover/Cargo.lock b/prover/Cargo.lock index e5b42f1601b..dbc3b3425e4 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8159,6 +8159,7 @@ dependencies = [ "zk_evm 0.141.0", "zk_evm 0.150.6", "zksync_contracts", + "zksync_mini_merkle_tree", "zksync_system_constants", "zksync_types", "zksync_utils", diff --git a/yarn.lock b/yarn.lock index 255bd901e03..58511dd1b9f 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1424,18 +1424,6 @@ resolved "https://registry.yarnpkg.com/@iarna/toml/-/toml-2.2.5.tgz#b32366c89b43c6f8cefbdefac778b9c828e3ba8c" integrity sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg== -"@isaacs/cliui@^8.0.2": - version "8.0.2" - resolved "https://registry.yarnpkg.com/@isaacs/cliui/-/cliui-8.0.2.tgz#b37667b7bc181c168782259bab42474fbf52b550" - integrity sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA== - dependencies: - string-width "^5.1.2" - string-width-cjs "npm:string-width@^4.2.0" - strip-ansi "^7.0.1" - strip-ansi-cjs "npm:strip-ansi@^6.0.1" - wrap-ansi "^8.1.0" - wrap-ansi-cjs "npm:wrap-ansi@^7.0.0" - "@istanbuljs/load-nyc-config@^1.0.0": version "1.1.0" resolved "https://registry.yarnpkg.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz#fd3db1d59ecf7cf121e80650bb86712f9b55eced" @@ -1727,24 +1715,15 @@ resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.5.0.tgz#40cb454fb187da4bb354f3acb48762a6657fcb36" integrity sha512-7LAgYYwoKWHeR+3CyWEvA3NKBKtt7ktcr7SX6ZPgbEYqHAdXH02vxJZGwNADtMWpyYm8h+fEQkpPIgErD4NhmA== dependencies: - "@matterlabs/hardhat-zksync-solc" "^1.0.5" - chalk "4.1.2" - ts-morph "^19.0.0" - -"@matterlabs/hardhat-zksync-deploy@^1.3.0": - version "1.3.0" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.3.0.tgz#5c2b723318ddf6c4d3929ec225401864ff54557a" - integrity sha512-4UHOgOwIBC4JA3W8DE9GHqbAuBhCPAjtM+Oew1aiYYGkIsPUAMYsH35+4I2FzJsYyE6mD6ATmoS/HfZweQHTlQ== - dependencies: - "@matterlabs/hardhat-zksync-solc" "^1.0.4" - chai "^4.3.6" - chalk "4.1.2" + "@matterlabs/hardhat-zksync-solc" "^1.2.0" + chai "^4.3.4" + chalk "^4.1.2" fs-extra "^11.2.0" - glob "^10.3.10" + glob "^10.4.1" lodash "^4.17.21" - sinon "^17.0.1" + sinon "^18.0.0" sinon-chai "^3.7.0" - ts-morph "^21.0.1" + ts-morph "^22.0.0" "@matterlabs/hardhat-zksync-node@^0.0.1-beta.7": version "0.0.1" @@ -1789,7 +1768,7 @@ chalk "4.1.2" dockerode "^3.3.4" -"@matterlabs/hardhat-zksync-solc@^1.0.4", "@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": +"@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": version "1.1.4" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.1.4.tgz#04a2fad6fb6b6944c64ad969080ee65b9af3f617" integrity sha512-4/usbogh9neewR2/v8Dn2OzqVblZMUuT/iH2MyPZgPRZYQlL4SlZtMvokU9UQjZT6iSoaKCbbdWESHDHSzfUjA== @@ -1823,10 +1802,10 @@ sinon-chai "^3.7.0" undici "^6.18.2" -"@matterlabs/hardhat-zksync-verify@^0.4.0": - version "0.4.0" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.4.0.tgz#f812c19950022fc36728f3796f6bdae5633e2fcd" - integrity sha512-GPZmAumFl3ZMPKbECX7Qw8CriwZKWd1DlCRhoG/6YYc6mFy4+MXkF1XsHLMs5r34N+GDOfbVZVMeftIlJC96Kg== +"@matterlabs/hardhat-zksync-solc@^1.2.4": + version "1.2.5" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.2.5.tgz#fbeeabc3fea0dd232fa3c8cb31bd93c103eba11a" + integrity sha512-iZyznWl1Hoe/Z46hnUe1s2drBZBjJOS/eN+Ql2lIBX9B6NevBl9DYzkKzH5HEIMCLGnX9sWpRAJqUQJWy9UB6w== dependencies: "@nomiclabs/hardhat-docker" "^2.0.2" chai "^4.3.4" @@ -1871,20 +1850,20 @@ sinon "^18.0.0" sinon-chai "^3.7.0" -"@matterlabs/hardhat-zksync-vyper@^1.0.8": - version "1.0.8" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.0.8.tgz#d5bd496715a1e322b0bf3926b4146b4e18ab64ff" - integrity sha512-XR7rbfDuBG5/LZWYfhQTP9gD+U24hSJHDuZ9U55wgIfiQTOxPoztFwEbQNiC39vjT5MjP/Nv8/IDrlEBkaVCgw== +"@matterlabs/hardhat-zksync-vyper@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.1.0.tgz#b3fb304429e88a84b4abc3fe4e5a83b2f5e907bd" + integrity sha512-zDjHPeIuHRpumXiWZUbhoji4UJe09jTDRn4xnxsuVkLH7qLAm0VDFzCXYNMvEuySZSdhbSbekxJsH9Kunc5ycA== dependencies: - "@nomiclabs/hardhat-docker" "^2.0.0" - chai "^4.3.6" - chalk "4.1.2" + "@nomiclabs/hardhat-docker" "^2.0.2" + chai "^4.3.4" + chalk "^4.1.2" dockerode "^4.0.2" - fs-extra "^11.1.1" - semver "^7.5.4" - sinon "^17.0.1" + fs-extra "^11.2.0" + semver "^7.6.2" + sinon "^18.0.0" sinon-chai "^3.7.0" - undici "^5.14.0" + undici "^6.18.2" "@matterlabs/prettier-config@^1.0.3": version "1.0.3" @@ -2324,11 +2303,6 @@ resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.9.6.tgz#2a880a24eb19b4f8b25adc2a5095f2aa27f39677" integrity sha512-xSmezSupL+y9VkHZJGDoCBpmnB2ogM13ccaYDWqJTfS3dbuHkgjuwDFUmaFauBCboQMGB/S5UqUl2y54X99BmA== -"@pkgjs/parseargs@^0.11.0": - version "0.11.0" - resolved "https://registry.yarnpkg.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz#a77ea742fab25775145434eb1d2328cf5013ac33" - integrity sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg== - "@pkgr/core@^0.1.0": version "0.1.1" resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.1.1.tgz#1ec17e2edbec25c8306d424ecfbf13c7de1aaa31" @@ -2659,16 +2633,6 @@ mkdirp "^2.1.6" path-browserify "^1.0.1" -"@ts-morph/common@~0.22.0": - version "0.22.0" - resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.22.0.tgz#8951d451622a26472fbc3a227d6c3a90e687a683" - integrity sha512-HqNBuV/oIlMKdkLshXd1zKBqNQCsuPEsgQOkfFQ/eUKjRlwndXW1AjN9LVkBEIukm00gGXSRmfkl0Wv5VXLnlw== - dependencies: - fast-glob "^3.3.2" - minimatch "^9.0.3" - mkdirp "^3.0.1" - path-browserify "^1.0.1" - "@tsconfig/node10@^1.0.7": version "1.0.11" resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.11.tgz#6ee46400685f130e278128c7b38b7e031ff5b2f2" @@ -3341,11 +3305,6 @@ ansi-regex@^5.0.1: resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== -ansi-regex@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.0.1.tgz#3183e38fae9a65d7cb5e53945cd5897d0260a06a" - integrity sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA== - ansi-styles@^3.2.1: version "3.2.1" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" @@ -3365,11 +3324,6 @@ ansi-styles@^5.0.0: resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== -ansi-styles@^6.1.0: - version "6.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.2.1.tgz#0e62320cf99c21afff3b3012192546aacbfb05c5" - integrity sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug== - antlr4@^4.11.0: version "4.13.1" resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.1.tgz#1e0a1830a08faeb86217cb2e6c34716004e4253d" @@ -4481,7 +4435,7 @@ cross-spawn@^6.0.5: shebang-command "^1.2.0" which "^1.2.9" -cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3: +cross-spawn@^7.0.2, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== @@ -4784,11 +4738,6 @@ dotenv@^8.2.0: resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.6.0.tgz#061af664d19f7f4d8fc6e4ff9b584ce237adcb8b" integrity sha512-IrPdXQsk2BbzvCBGBOTmmSH5SodmqZNt4ERAZDmW4CT+tL8VtvinqywuANaFu4bOMWki16nqf0e4oC0QIaDr/g== -eastasianwidth@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb" - integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== - ecc-jsbn@~0.1.1: version "0.1.2" resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" @@ -4855,11 +4804,6 @@ emoji-regex@^8.0.0: resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== -emoji-regex@^9.2.2: - version "9.2.2" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" - integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== - encoding-down@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/encoding-down/-/encoding-down-6.3.0.tgz#b1c4eb0e1728c146ecaef8e32963c549e76d082b" @@ -5830,14 +5774,6 @@ for-each@^0.3.3: dependencies: is-callable "^1.1.3" -foreground-child@^3.1.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.1.1.tgz#1d173e776d75d2772fed08efe4a0de1ea1b12d0d" - integrity sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg== - dependencies: - cross-spawn "^7.0.0" - signal-exit "^4.0.1" - forever-agent@~0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" @@ -6126,17 +6062,6 @@ glob@8.1.0, glob@^8.0.3: minimatch "^5.0.1" once "^1.3.0" -glob@^10.3.10: - version "10.3.16" - resolved "https://registry.yarnpkg.com/glob/-/glob-10.3.16.tgz#bf6679d5d51279c8cfae4febe0d051d2a4bf4c6f" - integrity sha512-JDKXl1DiuuHJ6fVS2FXjownaavciiHNUU4mOvV/B793RLh05vZL1rcPnCSaOgv1hDT6RDlY7AB7ZUvFYAtPgAw== - dependencies: - foreground-child "^3.1.0" - jackspeak "^3.1.2" - minimatch "^9.0.1" - minipass "^7.0.4" - path-scurry "^1.11.0" - glob@^5.0.15: version "5.0.15" resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" @@ -7049,15 +6974,6 @@ istanbul-reports@^3.1.3: html-escaper "^2.0.0" istanbul-lib-report "^3.0.0" -jackspeak@^3.1.2: - version "3.1.2" - resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.1.2.tgz#eada67ea949c6b71de50f1b09c92a961897b90ab" - integrity sha512-kWmLKn2tRtfYMF/BakihVVRzBKOxz4gJMiL2Rj91WnAB5TPZumSH99R/Yf1qE1u4uRimvCSJfm6hnxohXeEXjQ== - dependencies: - "@isaacs/cliui" "^8.0.2" - optionalDependencies: - "@pkgjs/parseargs" "^0.11.0" - jest-changed-files@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a" @@ -7961,11 +7877,6 @@ lowercase-keys@^3.0.0: resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-3.0.0.tgz#c5e7d442e37ead247ae9db117a9d0a467c89d4f2" integrity sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ== -lru-cache@^10.2.0: - version "10.2.2" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.2.2.tgz#48206bc114c1252940c41b25b41af5b545aca878" - integrity sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ== - lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" @@ -8264,13 +8175,6 @@ minimatch@^7.4.3: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.1, minimatch@^9.0.3: - version "9.0.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" - integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== - dependencies: - brace-expansion "^2.0.1" - minimatch@~3.0.4: version "3.0.8" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.8.tgz#5e6a59bd11e2ab0de1cfb843eb2d82e546c321c1" @@ -8283,11 +8187,6 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8, minimist@~1. resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.0.4: - version "7.1.1" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.1.tgz#f7f85aff59aa22f110b20e27692465cf3bf89481" - integrity sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA== - mkdirp-classic@^0.5.2: version "0.5.3" resolved "https://registry.yarnpkg.com/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz#fa10c9115cc6d8865be221ba47ee9bed78601113" @@ -8310,11 +8209,6 @@ mkdirp@^2.1.6: resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-2.1.6.tgz#964fbcb12b2d8c5d6fbc62a963ac95a273e2cc19" integrity sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A== -mkdirp@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-3.0.1.tgz#e44e4c5607fb279c168241713cc6e0fea9adcb50" - integrity sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg== - mnemonist@^0.38.0: version "0.38.5" resolved "https://registry.yarnpkg.com/mnemonist/-/mnemonist-0.38.5.tgz#4adc7f4200491237fe0fa689ac0b86539685cade" @@ -8780,16 +8674,6 @@ package-json@^8.1.0: registry-url "^6.0.0" semver "^7.3.7" -package-json@^8.1.0: - version "8.1.1" - resolved "https://registry.yarnpkg.com/package-json/-/package-json-8.1.1.tgz#3e9948e43df40d1e8e78a85485f1070bf8f03dc8" - integrity sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA== - dependencies: - got "^12.1.0" - registry-auth-token "^5.0.1" - registry-url "^6.0.0" - semver "^7.3.7" - parent-module@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" @@ -8855,14 +8739,6 @@ path-parse@^1.0.6, path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -path-scurry@^1.11.0: - version "1.11.1" - resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2" - integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA== - dependencies: - lru-cache "^10.2.0" - minipass "^5.0.0 || ^6.0.2 || ^7.0.0" - path-to-regexp@^6.2.1: version "6.2.2" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-6.2.2.tgz#324377a83e5049cbecadc5554d6a63a9a4866b36" @@ -9863,11 +9739,6 @@ signal-exit@^3.0.2, signal-exit@^3.0.3, signal-exit@^3.0.7: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== -signal-exit@^4.0.1: - version "4.1.0" - resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04" - integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw== - sinon-chai@^3.7.0: version "3.7.0" resolved "https://registry.yarnpkg.com/sinon-chai/-/sinon-chai-3.7.0.tgz#cfb7dec1c50990ed18c153f1840721cf13139783" @@ -10199,15 +10070,6 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - string-width@^2.1.0, string-width@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" @@ -10216,14 +10078,14 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^5.0.1, string-width@^5.1.2: - version "5.1.2" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" - integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA== +string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== dependencies: - eastasianwidth "^0.2.0" - emoji-regex "^9.2.2" - strip-ansi "^7.0.1" + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" string.prototype.padend@^3.0.0: version "3.1.6" @@ -10282,13 +10144,6 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - strip-ansi@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" @@ -10303,12 +10158,12 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^7.0.1: - version "7.1.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" - integrity sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ== +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== dependencies: - ansi-regex "^6.0.1" + ansi-regex "^5.0.1" strip-bom@^3.0.0: version "3.0.0" @@ -10413,6 +10268,7 @@ synckit@^0.8.6: fast-glob "^3.3.2" hardhat "=2.22.2" preprocess "^3.2.0" + zksync-ethers "^5.9.0" table-layout@^1.0.2: version "1.0.2" @@ -10664,14 +10520,6 @@ ts-morph@^19.0.0: "@ts-morph/common" "~0.20.0" code-block-writer "^12.0.0" -ts-morph@^21.0.1: - version "21.0.1" - resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-21.0.1.tgz#712302a0f6e9dbf1aa8d9cf33a4386c4b18c2006" - integrity sha512-dbDtVdEAncKctzrVZ+Nr7kHpHkv+0JDJb2MjjpBaj8bFeCkePU9rHfMklmhuLFnpeq/EJZk2IhStY6NzqgjOkg== - dependencies: - "@ts-morph/common" "~0.22.0" - code-block-writer "^12.0.0" - ts-node@^10.1.0, ts-node@^10.7.0: version "10.9.2" resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.2.tgz#70f021c9e185bccdca820e26dc413805c101c71f" @@ -11152,7 +11000,7 @@ workerpool@6.2.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: +wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== @@ -11161,15 +11009,6 @@ workerpool@6.2.1: string-width "^4.1.0" strip-ansi "^6.0.0" -wrap-ansi@^8.1.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214" - integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ== - dependencies: - ansi-styles "^6.1.0" - string-width "^5.0.1" - strip-ansi "^7.0.1" - wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" diff --git a/zkstack_cli/crates/config/src/contracts.rs b/zkstack_cli/crates/config/src/contracts.rs index e6676989e68..6d336b5cfc1 100644 --- a/zkstack_cli/crates/config/src/contracts.rs +++ b/zkstack_cli/crates/config/src/contracts.rs @@ -85,6 +85,7 @@ impl ContractsConfig { ) -> anyhow::Result<()> { self.bridges.shared.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); self.bridges.erc20.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); + self.l2.legacy_shared_bridge_addr = Some(initialize_bridges_output.l2_shared_bridge_proxy); Ok(()) } @@ -159,4 +160,5 @@ pub struct L2Contracts { pub default_l2_upgrader: Address, pub consensus_registry: Option

, pub multicall3: Option
, + pub legacy_shared_bridge_addr: Option
, } From ee73a3973b0c65b1d4acef12e4b64db8f813e77d Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Thu, 24 Oct 2024 20:51:48 +1100 Subject: [PATCH 123/140] feat(zkstack_cli): use docker-managed volumes (#3140) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Host-bound volumes never get cleaned even if you do `docker compose down -v`. Existing dev flows don't seem to rely on volumes being on the host machine, so this is PoC of how we can move to Docker-managed volumes. ## Why ❔ Avoid pesky bugs that prevent user from deleting `./volumes`, rely on Docker to persist and dispose of data as need be ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../workflows/build-contract-verifier-template.yml | 1 - .github/workflows/build-core-template.yml | 1 - .github/workflows/build-local-node-docker.yml | 1 - .github/workflows/build-prover-template.yml | 1 - .../workflows/build-witness-generator-template.yml | 1 - .github/workflows/ci-common-reusable.yml | 1 - .github/workflows/ci-prover-e2e.yml | 1 - .github/workflows/ci-prover-reusable.yml | 2 -- bin/ci_localnet_up | 1 - docker-compose-gpu-runner-cuda-12-0.yml | 8 ++++++-- docker-compose-runner-nightly.yml | 5 ++++- docker-compose-unit-tests.yml | 1 - docker-compose.yml | 13 ++++++++----- zkstack_cli/crates/common/src/docker.rs | 6 +++++- .../crates/zkstack/src/commands/containers.rs | 12 ------------ .../zkstack/src/commands/dev/commands/clean/mod.rs | 6 +----- .../crates/zkstack/src/commands/dev/messages.rs | 4 +--- 17 files changed, 25 insertions(+), 40 deletions(-) diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index e4d04b90410..1481e542de5 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -101,7 +101,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index fe1d2342764..15d4432191d 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -114,7 +114,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index 80142cb6005..cbb4239b572 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -50,7 +50,6 @@ jobs: - name: start-services run: | - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 2dcb5dadb17..91de5dd51ec 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -75,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server diff --git a/.github/workflows/build-witness-generator-template.yml b/.github/workflows/build-witness-generator-template.yml index 33d78b3cf2f..d9493f97cae 100644 --- a/.github/workflows/build-witness-generator-template.yml +++ b/.github/workflows/build-witness-generator-template.yml @@ -75,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 7d75fb224d6..ea91fc4a7cd 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -27,7 +27,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - name: Install zkstack diff --git a/.github/workflows/ci-prover-e2e.yml b/.github/workflows/ci-prover-e2e.yml index 105ae1f1485..b0b9caf888f 100644 --- a/.github/workflows/ci-prover-e2e.yml +++ b/.github/workflows/ci-prover-e2e.yml @@ -29,7 +29,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres ./volumes/reth/data docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait ci_run sccache --start-server diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 4154885549b..7f719b2240d 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -27,7 +27,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - name: Install zkstack @@ -68,7 +67,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - name: Install zkstack diff --git a/bin/ci_localnet_up b/bin/ci_localnet_up index 8673a909af7..c399de410d7 100755 --- a/bin/ci_localnet_up +++ b/bin/ci_localnet_up @@ -4,6 +4,5 @@ set -e cd $ZKSYNC_HOME -mkdir -p ./volumes/postgres ./volumes/reth/data run_retried docker-compose pull docker-compose --profile runner up -d --wait diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index c930fa376f5..bd91a5a5b0e 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -6,8 +6,8 @@ services: ports: - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata @@ -69,3 +69,7 @@ services: environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here - POSTGRES_PASSWORD=notsecurepassword + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-runner-nightly.yml b/docker-compose-runner-nightly.yml index cadd1009f7a..4a854aa0b0a 100644 --- a/docker-compose-runner-nightly.yml +++ b/docker-compose-runner-nightly.yml @@ -1,4 +1,3 @@ -version: '3.2' services: zk: image: ghcr.io/matter-labs/zk-environment:latest2.0-lightweight-nightly @@ -15,3 +14,7 @@ services: extends: file: docker-compose.yml service: reth + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-unit-tests.yml b/docker-compose-unit-tests.yml index ddbc76bb196..b839be2d9f4 100644 --- a/docker-compose-unit-tests.yml +++ b/docker-compose-unit-tests.yml @@ -1,4 +1,3 @@ -version: '3.2' name: unit_tests services: # An instance of postgres configured to execute Rust unit-tests, tuned for performance. diff --git a/docker-compose.yml b/docker-compose.yml index 1e3a273ec9a..d8f40720fe8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,3 @@ -version: '3.2' services: reth: restart: always @@ -6,8 +5,8 @@ services: ports: - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata @@ -22,8 +21,8 @@ services: ports: - 127.0.0.1:5432:5432 volumes: - - type: bind - source: ./volumes/postgres + - type: volume + source: postgres-data target: /var/lib/postgresql/data environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here @@ -56,3 +55,7 @@ services: profiles: - runner network_mode: host + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/zkstack_cli/crates/common/src/docker.rs b/zkstack_cli/crates/common/src/docker.rs index a5731808814..71e2040ee31 100644 --- a/zkstack_cli/crates/common/src/docker.rs +++ b/zkstack_cli/crates/common/src/docker.rs @@ -14,7 +14,11 @@ pub fn up(shell: &Shell, docker_compose_file: &str, detach: bool) -> anyhow::Res } pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run()?) + Ok(Cmd::new(cmd!( + shell, + "docker compose -f {docker_compose_file} down -v" + )) + .run()?) } pub fn run(shell: &Shell, docker_image: &str, docker_args: Vec) -> anyhow::Result<()> { diff --git a/zkstack_cli/crates/zkstack/src/commands/containers.rs b/zkstack_cli/crates/zkstack/src/commands/containers.rs index 9c11cc2e3ef..8367289bd67 100644 --- a/zkstack_cli/crates/zkstack/src/commands/containers.rs +++ b/zkstack_cli/crates/zkstack/src/commands/containers.rs @@ -36,10 +36,6 @@ pub fn run(shell: &Shell, args: ContainersArgs) -> anyhow::Result<()> { } pub fn initialize_docker(shell: &Shell, ecosystem: &EcosystemConfig) -> anyhow::Result<()> { - if !shell.path_exists("volumes") { - create_docker_folders(shell)?; - }; - if !shell.path_exists(DOCKER_COMPOSE_FILE) { copy_dockerfile(shell, ecosystem.link_to_code.clone())?; }; @@ -75,14 +71,6 @@ pub fn start_containers(shell: &Shell, observability: bool) -> anyhow::Result<() Ok(()) } -fn create_docker_folders(shell: &Shell) -> anyhow::Result<()> { - shell.create_dir("volumes")?; - shell.create_dir("volumes/postgres")?; - shell.create_dir("volumes/reth")?; - shell.create_dir("volumes/reth/data")?; - Ok(()) -} - fn copy_dockerfile(shell: &Shell, link_to_code: PathBuf) -> anyhow::Result<()> { let docker_compose_file = link_to_code.join(DOCKER_COMPOSE_FILE); diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs index 4cb419ce7a4..0929f5e4623 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs @@ -5,8 +5,7 @@ use config::{EcosystemConfig, DOCKER_COMPOSE_FILE}; use xshell::Shell; use crate::commands::dev::messages::{ - MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_CLEANED, - MSG_DOCKER_COMPOSE_DOWN, MSG_DOCKER_COMPOSE_REMOVE_VOLUMES, + MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_DOWN, }; #[derive(Subcommand, Debug)] @@ -35,9 +34,6 @@ pub fn run(shell: &Shell, args: CleanCommands) -> anyhow::Result<()> { pub fn containers(shell: &Shell) -> anyhow::Result<()> { logger::info(MSG_DOCKER_COMPOSE_DOWN); docker::down(shell, DOCKER_COMPOSE_FILE)?; - logger::info(MSG_DOCKER_COMPOSE_REMOVE_VOLUMES); - shell.remove_path("volumes")?; - logger::info(MSG_DOCKER_COMPOSE_CLEANED); Ok(()) } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs index a38fff5a178..3d31497b7eb 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs @@ -157,9 +157,7 @@ pub(super) const MSG_UPGRADE_TEST_RUN_INFO: &str = "Running upgrade test"; pub(super) const MSG_UPGRADE_TEST_RUN_SUCCESS: &str = "Upgrade test ran successfully"; // Cleaning related messages -pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down"; -pub(super) const MSG_DOCKER_COMPOSE_REMOVE_VOLUMES: &str = "docker compose remove volumes"; -pub(super) const MSG_DOCKER_COMPOSE_CLEANED: &str = "docker compose network cleaned"; +pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down -v"; pub(super) const MSG_CONTRACTS_CLEANING: &str = "Removing contracts building and deployment artifacts"; pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = From 1eb69d467802d07f3fc6502de97ff04a69f952fc Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 24 Oct 2024 15:13:12 +0200 Subject: [PATCH 124/140] feat(proof-data-handler): add first processed batch option (#3112) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add an option to the proof data handler to allow the first verified batch to be set. ## Why ❔ To be able to skip some batches if we need to. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/config/src/configs/mod.rs | 2 +- .../config/src/configs/proof_data_handler.rs | 33 ++++++++++++++++++- core/lib/config/src/testonly.rs | 5 ++- core/lib/dal/src/tee_proof_generation_dal.rs | 4 +-- core/lib/env_config/src/proof_data_handler.rs | 14 ++++++-- .../protobuf_config/src/proof_data_handler.rs | 16 ++++++--- .../src/proto/config/prover.proto | 3 +- core/node/proof_data_handler/src/lib.rs | 2 +- .../src/tee_request_processor.rs | 6 ++-- core/node/proof_data_handler/src/tests.rs | 12 +++++-- 10 files changed, 78 insertions(+), 19 deletions(-) diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index a8d136d632e..b3a7c291343 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -22,7 +22,7 @@ pub use self::{ genesis::GenesisConfig, object_store::ObjectStoreConfig, observability::{ObservabilityConfig, OpentelemetryConfig}, - proof_data_handler::ProofDataHandlerConfig, + proof_data_handler::{ProofDataHandlerConfig, TeeConfig}, prover_job_monitor::ProverJobMonitorConfig, pruning::PruningConfig, secrets::{DatabaseSecrets, L1Secrets, Secrets}, diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index de7f6969b05..1094b1bb180 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -1,12 +1,43 @@ use std::time::Duration; use serde::Deserialize; +use zksync_basic_types::L1BatchNumber; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct TeeConfig { + /// If true, the TEE support is enabled. + pub tee_support: bool, + /// All batches before this one are considered to be processed. + pub first_tee_processed_batch: L1BatchNumber, +} + +impl Default for TeeConfig { + fn default() -> Self { + TeeConfig { + tee_support: Self::default_tee_support(), + first_tee_processed_batch: Self::default_first_tee_processed_batch(), + } + } +} + +impl TeeConfig { + pub fn default_tee_support() -> bool { + false + } + + pub fn default_first_tee_processed_batch() -> L1BatchNumber { + L1BatchNumber(0) + } +} #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ProofDataHandlerConfig { pub http_port: u16, pub proof_generation_timeout_in_secs: u16, - pub tee_support: bool, + #[serde(skip)] + // ^ Filled in separately in `Self::from_env()`. We cannot use `serde(flatten)` because it + // doesn't work with `envy`: https://github.com/softprops/envy/issues/26 + pub tee_config: TeeConfig, } impl ProofDataHandlerConfig { diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index ce681cc0cc4..3bf4609bb70 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -677,7 +677,10 @@ impl Distribution for EncodeDist { configs::ProofDataHandlerConfig { http_port: self.sample(rng), proof_generation_timeout_in_secs: self.sample(rng), - tee_support: self.sample(rng), + tee_config: configs::TeeConfig { + tee_support: self.sample(rng), + first_tee_processed_batch: L1BatchNumber(rng.gen()), + }, } } } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index bde07f73280..755d0276910 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -32,10 +32,10 @@ impl TeeProofGenerationDal<'_, '_> { &mut self, tee_type: TeeType, processing_timeout: Duration, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); - let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); + let min_batch_number = i64::from(min_batch_number.0); sqlx::query!( r#" WITH upsert AS ( diff --git a/core/lib/env_config/src/proof_data_handler.rs b/core/lib/env_config/src/proof_data_handler.rs index f69aa1d6dc5..b5bfda4544e 100644 --- a/core/lib/env_config/src/proof_data_handler.rs +++ b/core/lib/env_config/src/proof_data_handler.rs @@ -4,12 +4,18 @@ use crate::{envy_load, FromEnv}; impl FromEnv for ProofDataHandlerConfig { fn from_env() -> anyhow::Result { - envy_load("proof_data_handler", "PROOF_DATA_HANDLER_") + Ok(Self { + tee_config: envy_load("proof_data_handler.tee", "PROOF_DATA_HANDLER_")?, + ..envy_load("proof_data_handler", "PROOF_DATA_HANDLER_")? + }) } } #[cfg(test)] mod tests { + use zksync_basic_types::L1BatchNumber; + use zksync_config::configs::TeeConfig; + use super::*; use crate::test_utils::EnvMutex; @@ -19,7 +25,10 @@ mod tests { ProofDataHandlerConfig { http_port: 3320, proof_generation_timeout_in_secs: 18000, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(1337), + }, } } @@ -29,6 +38,7 @@ mod tests { PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS="18000" PROOF_DATA_HANDLER_HTTP_PORT="3320" PROOF_DATA_HANDLER_TEE_SUPPORT="true" + PROOF_DATA_HANDLER_FIRST_TEE_PROCESSED_BATCH="1337" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs index 4b7bd2fd7c3..a587c702633 100644 --- a/core/lib/protobuf_config/src/proof_data_handler.rs +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; use zksync_config::configs; use zksync_protobuf::{repr::ProtoRepr, required}; +use zksync_types::L1BatchNumber; use crate::proto::prover as proto; @@ -14,9 +15,15 @@ impl ProtoRepr for proto::ProofDataHandler { proof_generation_timeout_in_secs: required(&self.proof_generation_timeout_in_secs) .and_then(|x| Ok((*x).try_into()?)) .context("proof_generation_timeout_in_secs")?, - tee_support: required(&self.tee_support) - .copied() - .context("tee_support")?, + tee_config: configs::TeeConfig { + tee_support: self + .tee_support + .unwrap_or_else(configs::TeeConfig::default_tee_support), + first_tee_processed_batch: self + .first_tee_processed_batch + .map(|x| L1BatchNumber(x as u32)) + .unwrap_or_else(configs::TeeConfig::default_first_tee_processed_batch), + }, }) } @@ -24,7 +31,8 @@ impl ProtoRepr for proto::ProofDataHandler { Self { http_port: Some(this.http_port.into()), proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), - tee_support: Some(this.tee_support), + tee_support: Some(this.tee_config.tee_support), + first_tee_processed_batch: Some(this.tee_config.first_tee_processed_batch.0 as u64), } } } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index 4fe3861183b..92ba770a756 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -107,5 +107,6 @@ message WitnessVectorGenerator { message ProofDataHandler { optional uint32 http_port = 1; // required; u16 optional uint32 proof_generation_timeout_in_secs = 2; // required; s - optional bool tee_support = 3; // required + optional bool tee_support = 3; // optional + optional uint64 first_tee_processed_batch = 4; // optional } diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 661c76d2000..e014fca15d7 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -94,7 +94,7 @@ fn create_proof_processing_router( ), ); - if config.tee_support { + if config.tee_config.tee_support { let get_tee_proof_gen_processor = TeeRequestProcessor::new(blob_store, connection_pool, config.clone(), l2_chain_id); let submit_tee_proof_processor = get_tee_proof_gen_processor.clone(); diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 800dede23c7..8e06d0c26bc 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -47,7 +47,7 @@ impl TeeRequestProcessor { ) -> Result>, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - let mut min_batch_number: Option = None; + let mut min_batch_number = self.config.tee_config.first_tee_processed_batch; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; let result = loop { @@ -72,7 +72,7 @@ impl TeeRequestProcessor { None => Some((l1_batch_number, l1_batch_number)), }; self.unlock_batch(l1_batch_number, request.tee_type).await?; - min_batch_number = Some(min_batch_number.unwrap_or(l1_batch_number) + 1); + min_batch_number = l1_batch_number + 1; } Err(err) => { self.unlock_batch(l1_batch_number, request.tee_type).await?; @@ -156,7 +156,7 @@ impl TeeRequestProcessor { async fn lock_batch_for_proving( &self, tee_type: TeeType, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> Result, RequestProcessorError> { self.pool .connection_tagged("tee_request_processor") diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index a10044cacd9..63ea087a81c 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -7,7 +7,7 @@ use axum::{ use serde_json::json; use tower::ServiceExt; use zksync_basic_types::L2ChainId; -use zksync_config::configs::ProofDataHandlerConfig; +use zksync_config::configs::{ProofDataHandlerConfig, TeeConfig}; use zksync_dal::{ConnectionPool, CoreDal}; use zksync_object_store::MockObjectStore; use zksync_prover_interface::api::SubmitTeeProofRequest; @@ -25,7 +25,10 @@ async fn request_tee_proof_inputs() { ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + }, }, L1BatchCommitmentMode::Rollup, L2ChainId::default(), @@ -80,7 +83,10 @@ async fn submit_tee_proof() { ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + }, }, L1BatchCommitmentMode::Rollup, L2ChainId::default(), From 8089b78b3f2cdbe8d0a23e9b8412a8022d78ada2 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 24 Oct 2024 15:25:00 +0200 Subject: [PATCH 125/140] fix(consensus): payload encoding protected by protocol_version (#3168) Changing payload encoding without protocol version change would invalidate consensus signatures. --- .github/workflows/ci-core-reusable.yml | 4 ++ core/lib/dal/src/consensus/conv.rs | 83 ++++++++++++++++-------- core/lib/dal/src/consensus/mod.rs | 2 +- core/lib/dal/src/consensus/tests.rs | 34 ++++++---- core/lib/dal/src/models/storage_sync.rs | 2 +- core/node/consensus/src/storage/store.rs | 9 +-- zkstack_cli/rust-toolchain | 2 +- 7 files changed, 85 insertions(+), 51 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 9aaa476d740..c79e3431576 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -8,6 +8,10 @@ on: required: false default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' +env: + RUST_BACKTRACE: 1 + PASSED_ENV_VARS: RUST_BACKTRACE + jobs: lint: name: lint diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs index 2b8488dd0c2..f0948adfd1d 100644 --- a/core/lib/dal/src/consensus/conv.rs +++ b/core/lib/dal/src/consensus/conv.rs @@ -2,7 +2,7 @@ use anyhow::{anyhow, Context as _}; use zksync_concurrency::net; use zksync_consensus_roles::{attester, node}; -use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; +use zksync_protobuf::{read_optional_repr, read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ abi, commitment::{L1BatchCommitmentMode, PubdataParams}, @@ -104,6 +104,31 @@ impl ProtoFmt for AttestationStatus { } } +impl ProtoRepr for proto::PubdataParams { + type Type = PubdataParams; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + l2_da_validator_address: required(&self.l2_da_validator_address) + .and_then(|a| parse_h160(a)) + .context("l2_da_validator_address")?, + pubdata_type: required(&self.pubdata_type) + .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("pubdata_type")? + .parse(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + l2_da_validator_address: Some(this.l2_da_validator_address.as_bytes().into()), + pubdata_type: Some( + proto::L1BatchCommitDataGeneratorMode::new(&this.pubdata_type) as i32, + ), + } + } +} + impl ProtoFmt for Payload { type Proto = proto::Payload; @@ -137,21 +162,7 @@ impl ProtoFmt for Payload { } } - let pubdata_params = if let Some(pubdata_params) = &r.pubdata_params { - Some(PubdataParams { - l2_da_validator_address: required(&pubdata_params.l2_da_validator_address) - .and_then(|a| parse_h160(a)) - .context("l2_da_validator_address")?, - pubdata_type: required(&pubdata_params.pubdata_type) - .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) - .context("pubdata_type")? - .parse(), - }) - } else { - None - }; - - Ok(Self { + let this = Self { protocol_version, hash: required(&r.hash) .and_then(|h| parse_h256(h)) @@ -169,11 +180,32 @@ impl ProtoFmt for Payload { .context("operator_address")?, transactions, last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, - pubdata_params, - }) + pubdata_params: read_optional_repr(&r.pubdata_params) + .context("pubdata_params")? + .unwrap_or_default(), + }; + if this.protocol_version.is_pre_gateway() { + anyhow::ensure!( + this.pubdata_params == PubdataParams::default(), + "pubdata_params should have the default value in pre-gateway protocol_version" + ); + } + if this.pubdata_params == PubdataParams::default() { + anyhow::ensure!( + r.pubdata_params.is_none(), + "default pubdata_params should be encoded as None" + ); + } + Ok(this) } fn build(&self) -> Self::Proto { + if self.protocol_version.is_pre_gateway() { + assert_eq!( + self.pubdata_params, PubdataParams::default(), + "BUG DETECTED: pubdata_params should have the default value in pre-gateway protocol_version" + ); + } let mut x = Self::Proto { protocol_version: Some((self.protocol_version as u16).into()), hash: Some(self.hash.as_bytes().into()), @@ -188,16 +220,11 @@ impl ProtoFmt for Payload { transactions: vec![], transactions_v25: vec![], last_in_batch: Some(self.last_in_batch), - pubdata_params: self - .pubdata_params - .map(|pubdata_params| proto::PubdataParams { - l2_da_validator_address: Some( - pubdata_params.l2_da_validator_address.as_bytes().into(), - ), - pubdata_type: Some(proto::L1BatchCommitDataGeneratorMode::new( - &pubdata_params.pubdata_type, - ) as i32), - }), + pubdata_params: if self.pubdata_params == PubdataParams::default() { + None + } else { + Some(ProtoRepr::build(&self.pubdata_params)) + }, }; match self.protocol_version { v if v >= ProtocolVersionId::Version25 => { diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index c7e46b2cf1b..96efc634835 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -48,7 +48,7 @@ pub struct Payload { pub operator_address: Address, pub transactions: Vec, pub last_in_batch: bool, - pub pubdata_params: Option, + pub pubdata_params: PubdataParams, } impl Payload { diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index c9fd91748b2..df6ee24bfa9 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; use rand::Rng; -use zksync_concurrency::ctx; +use zksync_concurrency::{ctx, testonly::abort_on_panic}; use zksync_protobuf::{ repr::{decode, encode}, testonly::{test_encode, test_encode_all_formats, FmtConv}, @@ -53,19 +53,24 @@ fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { }) .collect(), last_in_batch: rng.gen(), - pubdata_params: Some(PubdataParams { - pubdata_type: match rng.gen_range(0..2) { - 0 => L1BatchCommitmentMode::Rollup, - _ => L1BatchCommitmentMode::Validium, - }, - l2_da_validator_address: rng.gen(), - }), + pubdata_params: if protocol_version.is_pre_gateway() { + PubdataParams::default() + } else { + PubdataParams { + pubdata_type: match rng.gen_range(0..2) { + 0 => L1BatchCommitmentMode::Rollup, + _ => L1BatchCommitmentMode::Validium, + }, + l2_da_validator_address: rng.gen(), + } + }, } } /// Tests struct <-> proto struct conversions. #[test] fn test_encoding() { + abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); test_encode_all_formats::>(rng); @@ -78,10 +83,15 @@ fn test_encoding() { encode_decode::( mock_protocol_upgrade_transaction().into(), ); - let p = payload(rng, ProtocolVersionId::Version24); - test_encode(rng, &p); - let p = payload(rng, ProtocolVersionId::Version25); - test_encode(rng, &p); + // Test encoding in the current and all the future versions. + for v in ProtocolVersionId::latest() as u16.. { + let Ok(v) = ProtocolVersionId::try_from(v) else { + break; + }; + tracing::info!("version {v}"); + let p = payload(rng, v); + test_encode(rng, &p); + } } fn encode_decode(msg: P::Type) diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 0eb65a606d1..3f80f52c56e 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -139,7 +139,7 @@ impl SyncBlock { operator_address: self.fee_account_address, transactions, last_in_batch: self.last_in_batch, - pubdata_params: Some(self.pubdata_params), + pubdata_params: self.pubdata_params, } } } diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 53be2fc63c7..4dce9041a10 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -28,13 +28,6 @@ fn to_fetched_block( .context("Integer overflow converting block number")?, ); let payload = Payload::decode(payload).context("Payload::decode()")?; - let pubdata_params = if payload.protocol_version.is_pre_gateway() { - payload.pubdata_params.unwrap_or_default() - } else { - payload - .pubdata_params - .context("Missing `pubdata_params` for post-gateway payload")? - }; Ok(FetchedBlock { number, l1_batch_number: payload.l1_batch_number, @@ -45,7 +38,7 @@ fn to_fetched_block( l1_gas_price: payload.l1_gas_price, l2_fair_gas_price: payload.l2_fair_gas_price, fair_pubdata_price: payload.fair_pubdata_price, - pubdata_params, + pubdata_params: payload.pubdata_params, virtual_blocks: payload.virtual_blocks, operator_address: payload.operator_address, transactions: payload diff --git a/zkstack_cli/rust-toolchain b/zkstack_cli/rust-toolchain index dbd41264aa9..03c040b91f1 100644 --- a/zkstack_cli/rust-toolchain +++ b/zkstack_cli/rust-toolchain @@ -1 +1 @@ -1.81.0 +nightly-2024-08-01 From ffa18e1d84a4bb1ca9b897fbc0a55b9e3ef0964c Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 24 Oct 2024 17:40:00 +0300 Subject: [PATCH 126/140] feat(zk_toolbox): Add EVM emulator option to `zkstack` CLI (#3139) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Allows to enable EVM emulation support from `zkstack chain create` by specifying the `--evm-emulator` command-line arg or answering the corresponding prompt. The prompt only activates if the EVM emulator bytecode hash is specified in the template genesis config (currently, it's not); more generally, it is impossible to create a chain with EVM emulation support if its bytecode hash is unknown. ## Why ❔ Part of efforts to enable EVM emulation. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- etc/env/file_based/genesis.yaml | 2 +- zkstack_cli/crates/config/src/chain.rs | 4 ++ zkstack_cli/crates/config/src/ecosystem.rs | 7 +++- zkstack_cli/crates/config/src/genesis.rs | 15 +++++++- .../crates/zkstack/completion/_zkstack.zsh | 2 + .../crates/zkstack/completion/zkstack.fish | 2 + .../crates/zkstack/completion/zkstack.sh | 12 +++++- .../zkstack/src/commands/chain/args/create.rs | 38 ++++++++++++++++++- .../src/commands/chain/build_transactions.rs | 2 +- .../zkstack/src/commands/chain/create.rs | 3 ++ .../src/commands/chain/init/configs.rs | 2 +- .../src/commands/ecosystem/args/create.rs | 8 +++- .../zkstack/src/commands/ecosystem/common.rs | 2 +- zkstack_cli/crates/zkstack/src/messages.rs | 5 +++ 14 files changed, 93 insertions(+), 11 deletions(-) diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 1b154b9e9ea..9617b011d2c 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -12,5 +12,5 @@ prover: dummy_verifier: true genesis_protocol_semantic_version: 0.25.0 l1_batch_commit_data_generator_mode: Rollup -# Uncomment to enable EVM emulation (requires to run genesis) +# TODO: uncomment once EVM emulator is present in the `contracts` submodule # evm_emulator_hash: 0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91 diff --git a/zkstack_cli/crates/config/src/chain.rs b/zkstack_cli/crates/config/src/chain.rs index 6c82d6ef3c3..c8fa0717dff 100644 --- a/zkstack_cli/crates/config/src/chain.rs +++ b/zkstack_cli/crates/config/src/chain.rs @@ -40,6 +40,8 @@ pub struct ChainConfigInternal { pub wallet_creation: WalletCreation, #[serde(skip_serializing_if = "Option::is_none")] pub legacy_bridge: Option, + #[serde(default)] // for backward compatibility + pub evm_emulator: bool, } /// Chain configuration file. This file is created in the chain @@ -61,6 +63,7 @@ pub struct ChainConfig { pub wallet_creation: WalletCreation, pub shell: OnceCell, pub legacy_bridge: Option, + pub evm_emulator: bool, } impl Serialize for ChainConfig { @@ -157,6 +160,7 @@ impl ChainConfig { base_token: self.base_token.clone(), wallet_creation: self.wallet_creation, legacy_bridge: self.legacy_bridge, + evm_emulator: self.evm_emulator, } } } diff --git a/zkstack_cli/crates/config/src/ecosystem.rs b/zkstack_cli/crates/config/src/ecosystem.rs index 79cb1c4ea27..c67aebf2a46 100644 --- a/zkstack_cli/crates/config/src/ecosystem.rs +++ b/zkstack_cli/crates/config/src/ecosystem.rs @@ -178,6 +178,7 @@ impl EcosystemConfig { .artifacts_path .unwrap_or_else(|| self.get_chain_artifacts_path(name)), legacy_bridge: config.legacy_bridge, + evm_emulator: config.evm_emulator, }) } @@ -232,7 +233,11 @@ impl EcosystemConfig { } pub fn get_default_configs_path(&self) -> PathBuf { - self.link_to_code.join(CONFIGS_PATH) + Self::default_configs_path(&self.link_to_code) + } + + pub fn default_configs_path(link_to_code: &Path) -> PathBuf { + link_to_code.join(CONFIGS_PATH) } /// Path to the predefined ecosystem configs diff --git a/zkstack_cli/crates/config/src/genesis.rs b/zkstack_cli/crates/config/src/genesis.rs index 933252541f4..2d9ac7fcdc6 100644 --- a/zkstack_cli/crates/config/src/genesis.rs +++ b/zkstack_cli/crates/config/src/genesis.rs @@ -1,5 +1,6 @@ use std::path::Path; +use anyhow::Context as _; use xshell::Shell; use zksync_basic_types::L1ChainId; pub use zksync_config::GenesisConfig; @@ -11,11 +12,23 @@ use crate::{ ChainConfig, }; -pub fn update_from_chain_config(genesis: &mut GenesisConfig, config: &ChainConfig) { +pub fn update_from_chain_config( + genesis: &mut GenesisConfig, + config: &ChainConfig, +) -> anyhow::Result<()> { genesis.l2_chain_id = config.chain_id; // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network genesis.l1_chain_id = L1ChainId(config.l1_network.chain_id()); genesis.l1_batch_commit_data_generator_mode = config.l1_batch_commit_data_generator_mode; + genesis.evm_emulator_hash = if config.evm_emulator { + Some(genesis.evm_emulator_hash.context( + "impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash", + )?) + } else { + None + }; + Ok(()) } impl FileConfigWithDefaultName for GenesisConfig { diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh index b985f5b9334..4df431754c8 100644 --- a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -81,6 +81,7 @@ in-file\:"Specify file with wallets"))' \ '--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ '--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ '--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ '--start-containers=[Start reth and postgres containers after creation]' \ '--chain=[Chain to use]:CHAIN: ' \ '--legacy-bridge[]' \ @@ -241,6 +242,7 @@ in-file\:"Specify file with wallets"))' \ '--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ '--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ '--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ '--chain=[Chain to use]:CHAIN: ' \ '--legacy-bridge[]' \ '-v[Verbose mode]' \ diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.fish b/zkstack_cli/crates/zkstack/completion/zkstack.fish index f90bcf2c4ac..a1261082e6f 100644 --- a/zkstack_cli/crates/zkstack/completion/zkstack.fish +++ b/zkstack_cli/crates/zkstack/completion/zkstack.fish @@ -73,6 +73,7 @@ complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_se complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l start-containers -d 'Start reth and postgres containers after creation' -r -f -a "{true\t'',false\t''}" complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l legacy-bridge @@ -156,6 +157,7 @@ complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_s complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l legacy-bridge complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.sh b/zkstack_cli/crates/zkstack/completion/zkstack.sh index d21480bba2c..7cdb20ae9aa 100644 --- a/zkstack_cli/crates/zkstack/completion/zkstack.sh +++ b/zkstack_cli/crates/zkstack/completion/zkstack.sh @@ -1162,7 +1162,7 @@ _zkstack() { return 0 ;; zkstack__chain__create) - opts="-v -h --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --verbose --chain --ignore-prerequisites --help" + opts="-v -h --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --evm-emulator --verbose --chain --ignore-prerequisites --help" if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) return 0 @@ -1219,6 +1219,10 @@ _zkstack() { COMPREPLY=($(compgen -W "true false" -- "${cur}")) return 0 ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; --chain) COMPREPLY=($(compgen -f "${cur}")) return 0 @@ -4643,7 +4647,7 @@ _zkstack() { return 0 ;; zkstack__ecosystem__create) - opts="-v -h --ecosystem-name --l1-network --link-to-code --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --start-containers --verbose --chain --ignore-prerequisites --help" + opts="-v -h --ecosystem-name --l1-network --link-to-code --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --evm-emulator --start-containers --verbose --chain --ignore-prerequisites --help" if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) return 0 @@ -4715,6 +4719,10 @@ _zkstack() { COMPREPLY=($(compgen -W "true false" -- "${cur}")) return 0 ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; --start-containers) COMPREPLY=($(compgen -W "true false" -- "${cur}")) return 0 diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs index ccf64ad27ac..ae08d4712b3 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs @@ -1,14 +1,22 @@ -use std::{path::PathBuf, str::FromStr}; +use std::{ + path::{Path, PathBuf}, + str::FromStr, +}; use anyhow::{bail, Context}; use clap::{Parser, ValueEnum, ValueHint}; use common::{Prompt, PromptConfirm, PromptSelect}; -use config::forge_interface::deploy_ecosystem::output::Erc20Token; +use config::{ + forge_interface::deploy_ecosystem::output::Erc20Token, traits::ReadConfigWithBasePath, + EcosystemConfig, +}; use serde::{Deserialize, Serialize}; use slugify_rs::slugify; use strum::{Display, EnumIter, IntoEnumIterator}; use types::{BaseToken, L1BatchCommitmentMode, L1Network, ProverMode, WalletCreation}; +use xshell::Shell; use zksync_basic_types::H160; +use zksync_config::GenesisConfig; use crate::{ defaults::L2_CHAIN_ID, @@ -18,6 +26,7 @@ use crate::{ MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT, MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP, MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT, MSG_BASE_TOKEN_SELECTION_PROMPT, MSG_CHAIN_ID_HELP, MSG_CHAIN_ID_PROMPT, MSG_CHAIN_ID_VALIDATOR_ERR, MSG_CHAIN_NAME_PROMPT, + MSG_EVM_EMULATOR_HASH_MISSING_ERR, MSG_EVM_EMULATOR_HELP, MSG_EVM_EMULATOR_PROMPT, MSG_L1_BATCH_COMMIT_DATA_GENERATOR_MODE_PROMPT, MSG_L1_COMMIT_DATA_GENERATOR_MODE_HELP, MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR, MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR, MSG_PROVER_MODE_HELP, MSG_PROVER_VERSION_PROMPT, MSG_SET_AS_DEFAULT_HELP, @@ -67,14 +76,18 @@ pub struct ChainCreateArgs { pub(crate) set_as_default: Option, #[clap(long, default_value = "false")] pub(crate) legacy_bridge: bool, + #[arg(long, help = MSG_EVM_EMULATOR_HELP, default_missing_value = "true", num_args = 0..=1)] + evm_emulator: Option, } impl ChainCreateArgs { pub fn fill_values_with_prompt( self, + shell: &Shell, number_of_chains: u32, l1_network: &L1Network, possible_erc20: Vec, + link_to_code: &Path, ) -> anyhow::Result { let mut chain_name = self .chain_name @@ -211,6 +224,25 @@ impl ChainCreateArgs { } }; + let default_genesis_config = GenesisConfig::read_with_base_path( + shell, + EcosystemConfig::default_configs_path(link_to_code), + ) + .context("failed reading genesis config")?; + let has_evm_emulation_support = default_genesis_config.evm_emulator_hash.is_some(); + let evm_emulator = self.evm_emulator.unwrap_or_else(|| { + if !has_evm_emulation_support { + false + } else { + PromptConfirm::new(MSG_EVM_EMULATOR_PROMPT) + .default(false) + .ask() + } + }); + if !has_evm_emulation_support && evm_emulator { + bail!(MSG_EVM_EMULATOR_HASH_MISSING_ERR); + } + let set_as_default = self.set_as_default.unwrap_or_else(|| { PromptConfirm::new(MSG_SET_AS_DEFAULT_PROMPT) .default(true) @@ -227,6 +259,7 @@ impl ChainCreateArgs { base_token, set_as_default, legacy_bridge: self.legacy_bridge, + evm_emulator, }) } } @@ -242,6 +275,7 @@ pub struct ChainCreateArgsFinal { pub base_token: BaseToken, pub set_as_default: bool, pub legacy_bridge: bool, + pub evm_emulator: bool, } #[derive(Debug, Clone, EnumIter, Display, PartialEq, Eq)] diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs index 5f1be15231b..d3953c65659 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs @@ -42,7 +42,7 @@ pub(crate) async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::R logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, &chain_config); + update_from_chain_config(&mut genesis_config, &chain_config)?; // Copy ecosystem contracts let mut contracts_config = config diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs index 48a320ec27e..bdf5711e321 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs @@ -30,9 +30,11 @@ fn create( let tokens = ecosystem_config.get_erc20_tokens(); let args = args .fill_values_with_prompt( + shell, ecosystem_config.list_of_chains().len() as u32, &ecosystem_config.l1_network, tokens, + &ecosystem_config.link_to_code, ) .context(MSG_ARGS_VALIDATOR_ERR)?; @@ -89,6 +91,7 @@ pub(crate) fn create_chain_inner( wallet_creation: args.wallet_creation, shell: OnceCell::from(shell.clone()), legacy_bridge, + evm_emulator: args.evm_emulator, }; create_wallets( diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs index 82986d9b41a..31c5c681e7d 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -81,7 +81,7 @@ pub async fn init_configs( // Initialize genesis config let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, chain_config); + update_from_chain_config(&mut genesis_config, chain_config)?; genesis_config.save_with_base_path(shell, &chain_config.configs)?; // Initialize contracts config diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs index 14cb5206f6a..6b6c1236d36 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs @@ -71,7 +71,13 @@ impl EcosystemCreateArgs { // Make the only chain as a default one self.chain.set_as_default = Some(true); - let chain = self.chain.fill_values_with_prompt(0, &l1_network, vec![])?; + let chain = self.chain.fill_values_with_prompt( + shell, + 0, + &l1_network, + vec![], + Path::new(&link_to_code), + )?; let start_containers = self.start_containers.unwrap_or_else(|| { PromptConfirm::new(MSG_START_CONTAINERS_PROMPT) diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs index 42b8f79b97e..00d937bba29 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs @@ -28,7 +28,7 @@ pub async fn deploy_l1( let deploy_config_path = DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.input(&config.link_to_code); let default_genesis_config = GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) - .context("Context")?; + .context("failed reading genesis config")?; let wallets_config = config.get_wallets()?; // For deploying ecosystem we only need genesis batch params diff --git a/zkstack_cli/crates/zkstack/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs index b9786dc4d8d..516194ef721 100644 --- a/zkstack_cli/crates/zkstack/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -156,6 +156,7 @@ pub(super) const MSG_BASE_TOKEN_ADDRESS_HELP: &str = "Base token address"; pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP: &str = "Base token nominator"; pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_HELP: &str = "Base token denominator"; pub(super) const MSG_SET_AS_DEFAULT_HELP: &str = "Set as default chain"; +pub(super) const MSG_EVM_EMULATOR_HELP: &str = "Enable EVM emulator"; pub(super) const MSG_CHAIN_NAME_PROMPT: &str = "What do you want to name the chain?"; pub(super) const MSG_CHAIN_ID_PROMPT: &str = "What's the chain id?"; pub(super) const MSG_WALLET_CREATION_PROMPT: &str = "Select how do you want to create the wallet"; @@ -170,6 +171,7 @@ pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT: &str = pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT: &str = "What is the base token price denominator?"; pub(super) const MSG_SET_AS_DEFAULT_PROMPT: &str = "Set this chain as default?"; +pub(super) const MSG_EVM_EMULATOR_PROMPT: &str = "Enable EVM emulator?"; pub(super) const MSG_WALLET_PATH_INVALID_ERR: &str = "Invalid path"; pub(super) const MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR: &str = "Number is not zero"; pub(super) const MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR: &str = @@ -184,6 +186,9 @@ pub(super) const MSG_WALLET_CREATION_VALIDATOR_ERR: &str = "Localhost wallet is not supported for external networks"; pub(super) const MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND: &str = "Token Multiplier Setter not found. Specify it in a wallet config"; +pub(super) const MSG_EVM_EMULATOR_HASH_MISSING_ERR: &str = + "Impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash"; /// Chain genesis related messages pub(super) const MSG_L1_SECRETS_MUST_BE_PRESENTED: &str = "L1 secret must be presented"; From a5028da65608898ad41c6a4fd5c6ec4c28a45703 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 24 Oct 2024 17:50:09 +0200 Subject: [PATCH 127/140] fix(consensus): better logging of errors (#3170) Added more debug information in case of payload mismatch. Improved formatting of errors in the node_framework. --- core/lib/dal/src/consensus_dal/mod.rs | 12 ++++----- core/node/consensus/src/en.rs | 11 +++++--- core/node/consensus/src/mn.rs | 7 ++--- core/node/consensus/src/storage/connection.rs | 2 +- core/node/consensus/src/storage/store.rs | 4 +-- core/node/node_framework/src/service/error.rs | 27 ++++++++++++++++--- core/node/node_framework/src/service/mod.rs | 2 +- 7 files changed, 42 insertions(+), 23 deletions(-) diff --git a/core/lib/dal/src/consensus_dal/mod.rs b/core/lib/dal/src/consensus_dal/mod.rs index 4516434868c..a091421d857 100644 --- a/core/lib/dal/src/consensus_dal/mod.rs +++ b/core/lib/dal/src/consensus_dal/mod.rs @@ -69,8 +69,8 @@ pub struct ConsensusDal<'a, 'c> { pub enum InsertCertificateError { #[error("corresponding payload is missing")] MissingPayload, - #[error("certificate doesn't match the payload")] - PayloadMismatch, + #[error("certificate doesn't match the payload, payload = {0:?}")] + PayloadMismatch(Payload), #[error(transparent)] Dal(#[from] DalError), #[error(transparent)] @@ -528,7 +528,7 @@ impl ConsensusDal<'_, '_> { .await? .ok_or(E::MissingPayload)?; if header.payload != want_payload.encode().hash() { - return Err(E::PayloadMismatch); + return Err(E::PayloadMismatch(want_payload)); } sqlx::query!( r#" @@ -634,7 +634,7 @@ impl ConsensusDal<'_, '_> { pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, - ) -> Result<(), InsertCertificateError> { + ) -> anyhow::Result<()> { let cfg = self .global_config() .await @@ -652,9 +652,7 @@ impl ConsensusDal<'_, '_> { .context("batch()")? .context("batch is missing")?, ); - if cert.message.hash != hash { - return Err(InsertCertificateError::PayloadMismatch); - } + anyhow::ensure!(cert.message.hash == hash, "hash mismatch"); cert.verify(cfg.genesis.hash(), &committee) .context("cert.verify()")?; sqlx::query!( diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 8158cc5aeb2..5e9aadc8f37 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -222,7 +222,11 @@ impl EN { let mut next = attester::BatchNumber(0); loop { let status = loop { - match self.fetch_attestation_status(ctx).await { + match self + .fetch_attestation_status(ctx) + .await + .wrap("fetch_attestation_status()") + { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { if status.genesis != cfg.genesis.hash() { @@ -439,7 +443,7 @@ impl EN { }); while end.map_or(true, |end| queue.next() < end) { let block = recv.recv(ctx).await?.join(ctx).await?; - queue.send(block).await?; + queue.send(block).await.context("queue.send()")?; } Ok(()) }) @@ -448,7 +452,8 @@ impl EN { if first < queue.next() { self.pool .wait_for_payload(ctx, queue.next().prev().unwrap()) - .await?; + .await + .wrap("wait_for_payload()")?; } Ok(()) } diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 5abbdc3503b..2a280b2f161 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -10,7 +10,7 @@ use zksync_dal::consensus_dal; use crate::{ config, registry, - storage::{ConnectionPool, InsertCertificateError, Store}, + storage::{ConnectionPool, Store}, }; /// Task running a consensus validator for the main node. @@ -179,10 +179,7 @@ async fn run_attestation_controller( .wrap("connection()")? .insert_batch_certificate(ctx, &qc) .await - .map_err(|err| match err { - InsertCertificateError::Canceled(err) => ctx::Error::Canceled(err), - InsertCertificateError::Inner(err) => ctx::Error::Internal(err.into()), - })?; + .wrap("insert_batch_certificate()")?; } } .await; diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index c30398498a9..6ec5794e968 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -158,7 +158,7 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &attester::BatchQC, - ) -> Result<(), super::InsertCertificateError> { + ) -> ctx::Result<()> { Ok(ctx .wait(self.0.consensus_dal().insert_batch_certificate(cert)) .await??) diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 4dce9041a10..154509e97b1 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -255,9 +255,7 @@ impl StoreRunner { Err(InsertCertificateError::Canceled(err)) => { return Err(ctx::Error::Canceled(err)) } - Err(InsertCertificateError::Inner(err)) => { - return Err(ctx::Error::Internal(anyhow::Error::from(err))) - } + Err(err) => Err(err).context("insert_block_certificate()")?, } } diff --git a/core/node/node_framework/src/service/error.rs b/core/node/node_framework/src/service/error.rs index 890cc6b7d4b..66a1c13e873 100644 --- a/core/node/node_framework/src/service/error.rs +++ b/core/node/node_framework/src/service/error.rs @@ -1,20 +1,41 @@ +use std::fmt; + use crate::{task::TaskId, wiring_layer::WiringError}; /// An error that can occur during the task lifecycle. #[derive(Debug, thiserror::Error)] pub enum TaskError { - #[error("Task {0} failed: {1}")] + #[error("Task {0} failed: {1:#}")] TaskFailed(TaskId, anyhow::Error), #[error("Task {0} panicked: {1}")] TaskPanicked(TaskId, String), #[error("Shutdown for task {0} timed out")] TaskShutdownTimedOut(TaskId), - #[error("Shutdown hook {0} failed: {1}")] + #[error("Shutdown hook {0} failed: {1:#}")] ShutdownHookFailed(TaskId, anyhow::Error), #[error("Shutdown hook {0} timed out")] ShutdownHookTimedOut(TaskId), } +/// Wrapper of a list of errors with a reasonable formatting. +pub struct TaskErrors(pub Vec); + +impl From> for TaskErrors { + fn from(errs: Vec) -> Self { + Self(errs) + } +} + +impl fmt::Debug for TaskErrors { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0 + .iter() + .map(|err| format!("{err:#}")) + .collect::>() + .fmt(f) + } +} + /// An error that can occur during the service lifecycle. #[derive(Debug, thiserror::Error)] pub enum ZkStackServiceError { @@ -25,5 +46,5 @@ pub enum ZkStackServiceError { #[error("One or more wiring layers failed to initialize: {0:?}")] Wiring(Vec<(String, WiringError)>), #[error("One or more tasks failed: {0:?}")] - Task(Vec), + Task(TaskErrors), } diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index b6d42009354..00e50f7dc3b 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -171,7 +171,7 @@ impl ZkStackService { if self.errors.is_empty() { Ok(()) } else { - Err(ZkStackServiceError::Task(self.errors)) + Err(ZkStackServiceError::Task(self.errors.into())) } } From 1ffd22ffbe710469de0e7f27c6aae29453ec6d3e Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Fri, 25 Oct 2024 08:44:37 +0200 Subject: [PATCH 128/140] fix(tee_prover): add prometheus pull listener (#3169) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add a prometheus pull listener. ## Why ❔ To get the metrics out of the zk_tee_prover ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. Signed-off-by: Harald Hoyer --- core/bin/zksync_tee_prover/src/main.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/core/bin/zksync_tee_prover/src/main.rs b/core/bin/zksync_tee_prover/src/main.rs index 70c6f888185..aa0881011da 100644 --- a/core/bin/zksync_tee_prover/src/main.rs +++ b/core/bin/zksync_tee_prover/src/main.rs @@ -45,11 +45,12 @@ fn main() -> anyhow::Result<()> { .add_layer(SigintHandlerLayer) .add_layer(TeeProverLayer::new(tee_prover_config)); - if let Some(gateway) = prometheus_config.gateway_endpoint() { - let exporter_config = - PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()); - builder.add_layer(PrometheusExporterLayer(exporter_config)); - } + let exporter_config = if let Some(gateway) = prometheus_config.gateway_endpoint() { + PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()) + } else { + PrometheusExporterConfig::pull(prometheus_config.listener_port) + }; + builder.add_layer(PrometheusExporterLayer(exporter_config)); builder.build().run(observability_guard)?; Ok(()) From 3815252790fd0e9094f308b58dfde3a8b1a82277 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 25 Oct 2024 11:19:05 +0300 Subject: [PATCH 129/140] feat(metadata-calculator): Add debug endpoints for tree API (#3167) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Adds `/debug/nodes` and `/debug/stale-keys` endpoints for tree API to debug tree-related incidents. - Allows to run tree API on EN without a tree. ## Why ❔ Allows investigating tree-related incidents easier. Allowing to run tree API without a tree potentially improves UX / DevEx. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/bin/external_node/src/node_builder.rs | 39 ++++- core/bin/external_node/src/tests/mod.rs | 39 +---- core/lib/merkle_tree/src/domain.rs | 27 ++- core/lib/merkle_tree/src/errors.rs | 2 + core/lib/merkle_tree/src/lib.rs | 2 +- core/lib/merkle_tree/src/storage/rocksdb.rs | 27 ++- .../merkle_tree/src/storage/serialization.rs | 59 +++++-- core/lib/merkle_tree/src/types/internal.rs | 88 ++++++++-- core/lib/merkle_tree/src/types/mod.rs | 2 +- .../merkle_tree/tests/integration/domain.rs | 25 +++ .../src/api_server/metrics.rs | 2 + .../metadata_calculator/src/api_server/mod.rs | 161 +++++++++++++++++- .../src/api_server/tests.rs | 56 ++++++ core/node/metadata_calculator/src/helpers.rs | 56 +++++- core/node/metadata_calculator/src/lib.rs | 53 ++++++ .../layers/metadata_calculator.rs | 67 +++++++- 16 files changed, 617 insertions(+), 88 deletions(-) diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 883f3f8a5fa..b7f6f803902 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -11,7 +11,9 @@ use zksync_config::{ }, PostgresConfig, }; -use zksync_metadata_calculator::{MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig}; +use zksync_metadata_calculator::{ + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, +}; use zksync_node_api_server::web3::Namespace; use zksync_node_framework::{ implementations::layers::{ @@ -25,7 +27,7 @@ use zksync_node_framework::{ logs_bloom_backfill::LogsBloomBackfillLayer, main_node_client::MainNodeClientLayer, main_node_fee_params_fetcher::MainNodeFeeParamsFetcherLayer, - metadata_calculator::MetadataCalculatorLayer, + metadata_calculator::{MetadataCalculatorLayer, TreeApiServerLayer}, node_storage_init::{ external_node_strategy::{ExternalNodeInitStrategyLayer, SnapshotRecoveryConfig}, NodeStorageInitializerLayer, @@ -385,6 +387,29 @@ impl ExternalNodeBuilder { Ok(self) } + fn add_isolated_tree_api_layer(mut self) -> anyhow::Result { + let reader_config = MerkleTreeReaderConfig { + db_path: self.config.required.merkle_tree_path.clone(), + max_open_files: self.config.optional.merkle_tree_max_open_files, + multi_get_chunk_size: self.config.optional.merkle_tree_multi_get_chunk_size, + block_cache_capacity: self.config.optional.merkle_tree_block_cache_size(), + include_indices_and_filters_in_block_cache: self + .config + .optional + .merkle_tree_include_indices_and_filters_in_block_cache, + }; + let api_config = MerkleTreeApiConfig { + port: self + .config + .tree_component + .api_port + .context("should contain tree api port")?, + }; + self.node + .add_layer(TreeApiServerLayer::new(reader_config, api_config)); + Ok(self) + } + fn add_tx_sender_layer(mut self) -> anyhow::Result { let postgres_storage_config = PostgresStorageCachesConfig { factory_deps_cache_size: self.config.optional.factory_deps_cache_size() as u64, @@ -607,11 +632,11 @@ impl ExternalNodeBuilder { self = self.add_metadata_calculator_layer(with_tree_api)?; } Component::TreeApi => { - anyhow::ensure!( - components.contains(&Component::Tree), - "Merkle tree API cannot be started without a tree component" - ); - // Do nothing, will be handled by the `Tree` component. + if components.contains(&Component::Tree) { + // Do nothing, will be handled by the `Tree` component. + } else { + self = self.add_isolated_tree_api_layer()?; + } } Component::TreeFetcher => { self = self.add_tree_data_fetcher_layer()?; diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index c5dd88748e5..59aceea819f 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -17,7 +17,7 @@ mod utils; const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); const POLL_INTERVAL: Duration = Duration::from_millis(100); -#[test_casing(3, ["all", "core", "api"])] +#[test_casing(4, ["all", "core", "api", "core,tree_api"])] #[tokio::test] #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { @@ -170,40 +170,3 @@ async fn running_tree_without_core_is_not_allowed() { err ); } - -#[tokio::test] -async fn running_tree_api_without_tree_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging - let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; - - let l2_client = utils::mock_l2_client(&env); - let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address()); - - let node_handle = tokio::task::spawn_blocking(move || { - std::thread::spawn(move || { - let mut node = ExternalNodeBuilder::new(env.config)?; - inject_test_layers( - &mut node, - env.sigint_receiver, - env.app_health_sender, - eth_client, - l2_client, - ); - - // We're only interested in the error, so we drop the result. - node.build(env.components.0.into_iter().collect()).map(drop) - }) - .join() - .unwrap() - }); - - // Check that we cannot build the node without the core component. - let result = node_handle.await.expect("Building the node panicked"); - let err = result.expect_err("Building the node with tree api but without tree should fail"); - assert!( - err.to_string() - .contains("Merkle tree API cannot be started without a tree component"), - "Unexpected errror: {}", - err - ); -} diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index a4d577fc3ba..bb69bda209c 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -9,10 +9,11 @@ use crate::{ consistency::ConsistencyError, storage::{PatchSet, Patched, RocksDBWrapper}, types::{ - Key, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, ValueHash, - TREE_DEPTH, + Key, NodeKey, RawNode, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, + ValueHash, TREE_DEPTH, }, BlockOutput, HashTree, MerkleTree, MerkleTreePruner, MerkleTreePrunerHandle, NoVersionError, + PruneDatabase, }; impl TreeInstruction { @@ -444,6 +445,28 @@ impl ZkSyncTreeReader { self.0.entries_with_proofs(version, keys) } + /// Returns raw nodes for the specified `keys`. + pub fn raw_nodes(&self, keys: &[NodeKey]) -> Vec> { + let raw_nodes = self.0.db.raw_nodes(keys).into_iter(); + raw_nodes + .zip(keys) + .map(|(slice, key)| { + let slice = slice?; + Some(if key.is_empty() { + RawNode::deserialize_root(&slice) + } else { + RawNode::deserialize(&slice) + }) + }) + .collect() + } + + /// Returns raw stale keys obsoleted in the specified version of the tree. + pub fn raw_stale_keys(&self, l1_batch_number: L1BatchNumber) -> Vec { + let version = u64::from(l1_batch_number.0); + self.0.db.stale_keys(version) + } + /// Verifies consistency of the tree at the specified L1 batch number. /// /// # Errors diff --git a/core/lib/merkle_tree/src/errors.rs b/core/lib/merkle_tree/src/errors.rs index b8130717f93..c187ce4977b 100644 --- a/core/lib/merkle_tree/src/errors.rs +++ b/core/lib/merkle_tree/src/errors.rs @@ -22,6 +22,8 @@ pub enum DeserializeErrorKind { /// Bit mask specifying a child kind in an internal tree node is invalid. #[error("invalid bit mask specifying a child kind in an internal tree node")] InvalidChildKind, + #[error("data left after deserialization")] + Leftovers, /// Missing required tag in the tree manifest. #[error("missing required tag `{0}` in tree manifest")] diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 6f9da59cf0e..824f23eaf52 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -82,7 +82,7 @@ mod utils; pub mod unstable { pub use crate::{ errors::DeserializeError, - types::{Manifest, Node, NodeKey, ProfiledTreeOperation, Root}, + types::{Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root}, }; } diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index 711ccaa6137..22335c82940 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -53,6 +53,23 @@ impl NamedColumnFamily for MerkleTreeColumnFamily { type LocalProfiledOperation = RefCell>>; +/// Unifies keys that can be used to load raw data from RocksDB. +pub(crate) trait ToDbKey: Sync { + fn to_db_key(&self) -> Vec; +} + +impl ToDbKey for NodeKey { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(*self) + } +} + +impl ToDbKey for (NodeKey, bool) { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(self.0) + } +} + /// Main [`Database`] implementation wrapping a [`RocksDB`] reference. /// /// # Cloning @@ -112,7 +129,7 @@ impl RocksDBWrapper { .expect("Failed reading from RocksDB") } - fn raw_nodes(&self, keys: &NodeKeys) -> Vec>> { + pub(crate) fn raw_nodes(&self, keys: &[T]) -> Vec>> { // Propagate the currently profiled operation to rayon threads used in the parallel iterator below. let profiled_operation = self .profiled_operation @@ -126,7 +143,7 @@ impl RocksDBWrapper { let _guard = profiled_operation .as_ref() .and_then(ProfiledOperation::start_profiling); - let keys = chunk.iter().map(|(key, _)| key.to_db_key()); + let keys = chunk.iter().map(ToDbKey::to_db_key); let results = self.db.multi_get_cf(MerkleTreeColumnFamily::Tree, keys); results .into_iter() @@ -144,9 +161,9 @@ impl RocksDBWrapper { // If we didn't succeed with the patch set, or the key version is old, // access the underlying storage. let node = if is_leaf { - LeafNode::deserialize(raw_node).map(Node::Leaf) + LeafNode::deserialize(raw_node, false).map(Node::Leaf) } else { - InternalNode::deserialize(raw_node).map(Node::Internal) + InternalNode::deserialize(raw_node, false).map(Node::Internal) }; node.map_err(|err| { err.with_context(if is_leaf { @@ -187,7 +204,7 @@ impl Database for RocksDBWrapper { let Some(raw_root) = self.raw_node(&NodeKey::empty(version).to_db_key()) else { return Ok(None); }; - Root::deserialize(&raw_root) + Root::deserialize(&raw_root, false) .map(Some) .map_err(|err| err.with_context(ErrorContext::Root(version))) } diff --git a/core/lib/merkle_tree/src/storage/serialization.rs b/core/lib/merkle_tree/src/storage/serialization.rs index f21fece94e0..d0c573fd817 100644 --- a/core/lib/merkle_tree/src/storage/serialization.rs +++ b/core/lib/merkle_tree/src/storage/serialization.rs @@ -5,7 +5,7 @@ use std::{collections::HashMap, str}; use crate::{ errors::{DeserializeError, DeserializeErrorKind, ErrorContext}, types::{ - ChildRef, InternalNode, Key, LeafNode, Manifest, Node, Root, TreeTags, ValueHash, + ChildRef, InternalNode, Key, LeafNode, Manifest, Node, RawNode, Root, TreeTags, ValueHash, HASH_SIZE, KEY_SIZE, }, }; @@ -15,7 +15,7 @@ use crate::{ const LEB128_SIZE_ESTIMATE: usize = 3; impl LeafNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < KEY_SIZE + HASH_SIZE { return Err(DeserializeErrorKind::UnexpectedEof.into()); } @@ -26,6 +26,10 @@ impl LeafNode { let leaf_index = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafIndex) })?; + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } + Ok(Self { full_key, value_hash, @@ -105,7 +109,7 @@ impl ChildRef { } impl InternalNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < 4 { let err = DeserializeErrorKind::UnexpectedEof; return Err(err.with_context(ErrorContext::ChildrenMask)); @@ -134,6 +138,9 @@ impl InternalNode { } bitmap >>= 2; } + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } Ok(this) } @@ -161,8 +168,36 @@ impl InternalNode { } } +impl RawNode { + pub(crate) fn deserialize(bytes: &[u8]) -> Self { + Self { + raw: bytes.to_vec(), + leaf: LeafNode::deserialize(bytes, true).ok(), + internal: InternalNode::deserialize(bytes, true).ok(), + } + } + + pub(crate) fn deserialize_root(bytes: &[u8]) -> Self { + let root = Root::deserialize(bytes, true).ok(); + let node = root.and_then(|root| match root { + Root::Empty => None, + Root::Filled { node, .. } => Some(node), + }); + let (leaf, internal) = match node { + None => (None, None), + Some(Node::Leaf(leaf)) => (Some(leaf), None), + Some(Node::Internal(node)) => (None, Some(node)), + }; + Self { + raw: bytes.to_vec(), + leaf, + internal, + } + } +} + impl Root { - pub(super) fn deserialize(mut bytes: &[u8]) -> Result { + pub(super) fn deserialize(mut bytes: &[u8], strict: bool) -> Result { let leaf_count = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafCount) })?; @@ -172,11 +207,11 @@ impl Root { // Try both the leaf and internal node serialization; in some cases, a single leaf // may still be persisted as an internal node. Since serialization of an internal node with a single child // is always shorter than that a leaf, the order (first leaf, then internal node) is chosen intentionally. - LeafNode::deserialize(bytes) + LeafNode::deserialize(bytes, strict) .map(Node::Leaf) - .or_else(|_| InternalNode::deserialize(bytes).map(Node::Internal))? + .or_else(|_| InternalNode::deserialize(bytes, strict).map(Node::Internal))? } - _ => Node::Internal(InternalNode::deserialize(bytes)?), + _ => Node::Internal(InternalNode::deserialize(bytes, strict)?), }; Ok(Self::new(leaf_count, node)) } @@ -440,7 +475,7 @@ mod tests { assert_eq!(buffer[64], 42); // leaf index assert_eq!(buffer.len(), 65); - let leaf_copy = LeafNode::deserialize(&buffer).unwrap(); + let leaf_copy = LeafNode::deserialize(&buffer, true).unwrap(); assert_eq!(leaf_copy, leaf); } @@ -471,7 +506,7 @@ mod tests { let child_count = bitmap.count_ones(); assert_eq!(child_count, 2); - let node_copy = InternalNode::deserialize(&buffer).unwrap(); + let node_copy = InternalNode::deserialize(&buffer, true).unwrap(); assert_eq!(node_copy, node); } @@ -482,7 +517,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer, [0]); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -494,7 +529,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 1); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -506,7 +541,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 2); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } } diff --git a/core/lib/merkle_tree/src/types/internal.rs b/core/lib/merkle_tree/src/types/internal.rs index 399f6c840a3..2db075d9221 100644 --- a/core/lib/merkle_tree/src/types/internal.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -2,7 +2,9 @@ //! some of these types are declared as public and can be even exported using the `unstable` module. //! Still, logically these types are private, so adding them to new public APIs etc. is a logical error. -use std::{collections::HashMap, fmt, num::NonZeroU64}; +use std::{collections::HashMap, fmt, num::NonZeroU64, str::FromStr}; + +use anyhow::Context; use crate::{ hasher::{HashTree, InternalNodeCache}, @@ -276,6 +278,34 @@ impl fmt::Debug for Nibbles { } } +impl FromStr for Nibbles { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + anyhow::ensure!(s.len() <= KEY_SIZE * 2, "too many nibbles"); + let mut bytes = NibblesBytes::default(); + for (i, byte) in s.bytes().enumerate() { + let nibble = match byte { + b'0'..=b'9' => byte - b'0', + b'A'..=b'F' => byte - b'A' + 10, + b'a'..=b'f' => byte - b'a' + 10, + _ => anyhow::bail!("unexpected nibble: {byte:?}"), + }; + + assert!(nibble < 16); + if i % 2 == 0 { + bytes[i / 2] = nibble * 16; + } else { + bytes[i / 2] += nibble; + } + } + Ok(Self { + nibble_count: s.len(), + bytes, + }) + } +} + /// Versioned key in a radix-16 Merkle tree. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct NodeKey { @@ -283,12 +313,31 @@ pub struct NodeKey { pub(crate) nibbles: Nibbles, } -impl fmt::Debug for NodeKey { +impl fmt::Display for NodeKey { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!(formatter, "{}:{}", self.version, self.nibbles) } } +impl fmt::Debug for NodeKey { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, formatter) + } +} + +impl FromStr for NodeKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let (version, nibbles) = s + .split_once(':') + .context("node key does not contain `:` delimiter")?; + let version = version.parse().context("invalid key version")?; + let nibbles = nibbles.parse().context("invalid nibbles")?; + Ok(Self { version, nibbles }) + } +} + impl NodeKey { pub(crate) const fn empty(version: u64) -> Self { Self { @@ -331,19 +380,13 @@ impl NodeKey { } } -impl fmt::Display for NodeKey { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(formatter, "{}:{}", self.version, self.nibbles) - } -} - /// Leaf node of the tree. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] pub struct LeafNode { - pub(crate) full_key: Key, - pub(crate) value_hash: ValueHash, - pub(crate) leaf_index: u64, + pub full_key: Key, + pub value_hash: ValueHash, + pub leaf_index: u64, } impl LeafNode { @@ -364,7 +407,7 @@ impl LeafNode { /// Reference to a child in an [`InternalNode`]. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] -pub(crate) struct ChildRef { +pub struct ChildRef { pub hash: ValueHash, pub version: u64, pub is_leaf: bool, @@ -449,7 +492,7 @@ impl InternalNode { self.cache.get_or_insert(cache) } - pub(crate) fn children(&self) -> impl Iterator + '_ { + pub fn children(&self) -> impl Iterator + '_ { self.children.iter() } @@ -510,6 +553,17 @@ impl From for Node { } } +/// Raw node fetched from a database. +#[derive(Debug)] +pub struct RawNode { + /// Bytes for a serialized node. + pub raw: Vec, + /// Leaf if a node can be deserialized into it. + pub leaf: Option, + /// Internal node if a node can be deserialized into it. + pub internal: Option, +} + /// Root node of the tree. Besides a [`Node`], contains the general information about the tree /// (e.g., the number of leaves). #[derive(Debug, Clone)] @@ -614,15 +668,23 @@ mod tests { fn nibbles_and_node_key_display() { let nibbles = Nibbles::new(&TEST_KEY, 5); assert_eq!(nibbles.to_string(), "deadb"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 6); assert_eq!(nibbles.to_string(), "deadbe"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 9); assert_eq!(nibbles.to_string(), "deadbeef0"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let node_key = nibbles.with_version(3); assert_eq!(node_key.to_string(), "3:deadbeef0"); + let restored: NodeKey = node_key.to_string().parse().unwrap(); + assert_eq!(restored, node_key); } #[test] diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs index 807ae023876..63db4b318b2 100644 --- a/core/lib/merkle_tree/src/types/mod.rs +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -6,7 +6,7 @@ pub(crate) use self::internal::{ ChildRef, Nibbles, NibblesBytes, StaleNodeKey, TreeTags, HASH_SIZE, KEY_SIZE, TREE_DEPTH, }; pub use self::internal::{ - InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, Root, + InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root, }; mod internal; diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index abd3dbbcd3f..fa7ec4cfde3 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -68,6 +68,31 @@ fn basic_workflow() { tree.verify_consistency(L1BatchNumber(0)).unwrap(); assert_eq!(tree.root_hash(), expected_root_hash); assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(1)); + + let keys = ["0:", "0:0"].map(|key| key.parse().unwrap()); + let raw_nodes = tree.reader().raw_nodes(&keys); + assert_eq!(raw_nodes.len(), 2); + let raw_root = raw_nodes[0].as_ref().unwrap(); + assert!(!raw_root.raw.is_empty()); + assert!(raw_root.internal.is_some()); + assert!(raw_root.leaf.is_none()); + + let raw_node = raw_nodes[1].as_ref().unwrap(); + assert!(!raw_node.raw.is_empty()); + assert!(raw_node.leaf.is_none()); + let raw_node = raw_node.internal.as_ref().unwrap(); + + let (nibble, _) = raw_node + .children() + .find(|(_, child_ref)| child_ref.is_leaf) + .unwrap(); + let leaf_key = format!("0:0{nibble:x}").parse().unwrap(); + let raw_nodes = tree.reader().raw_nodes(&[leaf_key]); + assert_eq!(raw_nodes.len(), 1); + let raw_leaf = raw_nodes.into_iter().next().unwrap().expect("no leaf"); + assert!(!raw_leaf.raw.is_empty()); + assert!(raw_leaf.leaf.is_some()); + assert!(raw_leaf.internal.is_none()); } #[test] diff --git a/core/node/metadata_calculator/src/api_server/metrics.rs b/core/node/metadata_calculator/src/api_server/metrics.rs index d185861d07c..92f948e0970 100644 --- a/core/node/metadata_calculator/src/api_server/metrics.rs +++ b/core/node/metadata_calculator/src/api_server/metrics.rs @@ -9,6 +9,8 @@ use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics pub(super) enum MerkleTreeApiMethod { Info, GetProofs, + GetNodes, + GetStaleKeys, } /// Metrics for Merkle tree API. diff --git a/core/node/metadata_calculator/src/api_server/mod.rs b/core/node/metadata_calculator/src/api_server/mod.rs index 6f46e8aeea8..4612d859a3d 100644 --- a/core/node/metadata_calculator/src/api_server/mod.rs +++ b/core/node/metadata_calculator/src/api_server/mod.rs @@ -1,6 +1,6 @@ //! Primitive Merkle tree API used internally to fetch proofs. -use std::{fmt, future::Future, net::SocketAddr, pin::Pin}; +use std::{collections::HashMap, fmt, future::Future, net::SocketAddr, pin::Pin}; use anyhow::Context as _; use async_trait::async_trait; @@ -10,12 +10,16 @@ use axum::{ response::{IntoResponse, Response}, routing, Json, Router, }; -use serde::{Deserialize, Serialize}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use tokio::sync::watch; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_health_check::{CheckHealth, Health, HealthStatus}; -use zksync_merkle_tree::NoVersionError; -use zksync_types::{L1BatchNumber, H256, U256}; +use zksync_merkle_tree::{ + unstable::{NodeKey, RawNode}, + NoVersionError, ValueHash, +}; +use zksync_types::{web3, L1BatchNumber, H256, U256}; +use zksync_utils::u256_to_h256; use self::metrics::{MerkleTreeApiMethod, API_METRICS}; use crate::{AsyncTreeReader, LazyAsyncTreeReader, MerkleTreeInfo}; @@ -77,6 +81,117 @@ impl TreeEntryWithProof { } } +#[derive(Debug, PartialEq, Eq, Hash)] +struct HexNodeKey(NodeKey); + +impl Serialize for HexNodeKey { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&self.0.to_string()) + } +} + +impl<'de> Deserialize<'de> for HexNodeKey { + fn deserialize>(deserializer: D) -> Result { + struct HexNodeKeyVisitor; + + impl de::Visitor<'_> for HexNodeKeyVisitor { + type Value = HexNodeKey; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("hex-encoded versioned key like `123:c0ffee`") + } + + fn visit_str(self, v: &str) -> Result { + v.parse().map(HexNodeKey).map_err(de::Error::custom) + } + } + + deserializer.deserialize_str(HexNodeKeyVisitor) + } +} + +#[derive(Debug, Serialize)] +struct ApiLeafNode { + full_key: H256, + value_hash: H256, + leaf_index: u64, +} + +#[derive(Debug, Serialize)] +struct ApiChildRef { + hash: ValueHash, + version: u64, + is_leaf: bool, +} + +#[derive(Debug, Serialize)] +#[serde(transparent)] +struct ApiInternalNode(HashMap); + +#[derive(Debug, Serialize)] +struct ApiRawNode { + raw: web3::Bytes, + #[serde(skip_serializing_if = "Option::is_none")] + leaf: Option, + #[serde(skip_serializing_if = "Option::is_none")] + internal: Option, +} + +impl From for ApiRawNode { + fn from(node: RawNode) -> Self { + Self { + raw: web3::Bytes(node.raw), + leaf: node.leaf.map(|leaf| ApiLeafNode { + full_key: u256_to_h256(leaf.full_key), + value_hash: leaf.value_hash, + leaf_index: leaf.leaf_index, + }), + internal: node.internal.map(|internal| { + ApiInternalNode( + internal + .children() + .map(|(nibble, child_ref)| { + let nibble = if nibble < 10 { + b'0' + nibble + } else { + b'a' + nibble - 10 + }; + ( + char::from(nibble), + ApiChildRef { + hash: child_ref.hash, + version: child_ref.version, + is_leaf: child_ref.is_leaf, + }, + ) + }) + .collect(), + ) + }), + } + } +} + +#[derive(Debug, Deserialize)] +struct TreeNodesRequest { + keys: Vec, +} + +#[derive(Debug, Serialize)] +struct TreeNodesResponse { + nodes: HashMap, +} + +#[derive(Debug, Deserialize)] +struct StaleKeysRequest { + l1_batch_number: L1BatchNumber, +} + +#[derive(Debug, Serialize)] +struct StaleKeysResponse { + stale_keys: Vec, +} + /// Server-side tree API error. #[derive(Debug)] enum TreeApiServerError { @@ -343,6 +458,35 @@ impl AsyncTreeReader { Ok(Json(response)) } + async fn get_nodes_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetNodes].start(); + let keys: Vec<_> = request.keys.iter().map(|key| key.0).collect(); + let nodes = this.clone().raw_nodes(keys).await; + let nodes = request + .keys + .into_iter() + .zip(nodes) + .filter_map(|(key, node)| Some((key, node?.into()))) + .collect(); + let response = TreeNodesResponse { nodes }; + latency.observe(); + Json(response) + } + + async fn get_stale_keys_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetStaleKeys].start(); + let stale_keys = this.clone().raw_stale_keys(request.l1_batch_number).await; + let stale_keys = stale_keys.into_iter().map(HexNodeKey).collect(); + latency.observe(); + Json(StaleKeysResponse { stale_keys }) + } + async fn create_api_server( self, bind_address: &SocketAddr, @@ -353,6 +497,11 @@ impl AsyncTreeReader { let app = Router::new() .route("/", routing::get(Self::info_handler)) .route("/proofs", routing::post(Self::get_proofs_handler)) + .route("/debug/nodes", routing::post(Self::get_nodes_handler)) + .route( + "/debug/stale-keys", + routing::post(Self::get_stale_keys_handler), + ) .with_state(self); let listener = tokio::net::TcpListener::bind(bind_address) @@ -369,8 +518,8 @@ impl AsyncTreeReader { } tracing::info!("Stop signal received, Merkle tree API server is shutting down"); }) - .await - .context("Merkle tree API server failed")?; + .await + .context("Merkle tree API server failed")?; tracing::info!("Merkle tree API server shut down"); Ok(()) diff --git a/core/node/metadata_calculator/src/api_server/tests.rs b/core/node/metadata_calculator/src/api_server/tests.rs index 42a3152e6b5..815522a4cd8 100644 --- a/core/node/metadata_calculator/src/api_server/tests.rs +++ b/core/node/metadata_calculator/src/api_server/tests.rs @@ -72,11 +72,67 @@ async fn merkle_tree_api() { assert_eq!(err.version_count, 6); assert_eq!(err.missing_version, 10); + let raw_nodes_response = api_client + .inner + .post(format!("http://{local_addr}/debug/nodes")) + .json(&serde_json::json!({ "keys": ["0:", "0:0"] })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_nodes_response: serde_json::Value = raw_nodes_response.json().await.unwrap(); + assert_raw_nodes_response(&raw_nodes_response); + + let raw_stale_keys_response = api_client + .inner + .post(format!("http://{local_addr}/debug/stale-keys")) + .json(&serde_json::json!({ "l1_batch_number": 1 })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_stale_keys_response: serde_json::Value = raw_stale_keys_response.json().await.unwrap(); + assert_raw_stale_keys_response(&raw_stale_keys_response); + // Stop the calculator and the tree API server. stop_sender.send_replace(true); api_server_task.await.unwrap().unwrap(); } +fn assert_raw_nodes_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let response = response["nodes"].as_object().expect("not an object"); + let root = response["0:"].as_object().expect("not an object"); + assert!( + root.len() == 2 && root.contains_key("internal") && root.contains_key("raw"), + "{root:#?}" + ); + let root = root["internal"].as_object().expect("not an object"); + for key in root.keys() { + assert_eq!(key.len(), 1, "{key}"); + let key = key.as_bytes()[0]; + assert_matches!(key, b'0'..=b'9' | b'a'..=b'f'); + } + + let node = response["0:0"].as_object().expect("not an object"); + assert!( + node.len() == 2 && node.contains_key("internal") && node.contains_key("raw"), + "{node:#?}" + ); +} + +fn assert_raw_stale_keys_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let stale_keys = response["stale_keys"].as_array().expect("not an array"); + assert!(!stale_keys.is_empty()); // At least the root is always obsoleted + for stale_key in stale_keys { + let stale_key = stale_key.as_str().expect("not a string"); + stale_key.parse::().unwrap(); + } +} + #[tokio::test] async fn api_client_connection_error() { // Use an address that will definitely fail on a timeout. diff --git a/core/node/metadata_calculator/src/helpers.rs b/core/node/metadata_calculator/src/helpers.rs index b6989afb179..3f370afaf77 100644 --- a/core/node/metadata_calculator/src/helpers.rs +++ b/core/node/metadata_calculator/src/helpers.rs @@ -22,6 +22,7 @@ use zksync_health_check::{CheckHealth, Health, HealthStatus, ReactiveHealthCheck use zksync_merkle_tree::{ domain::{TreeMetadata, ZkSyncTree, ZkSyncTreeReader}, recovery::{MerkleTreeRecovery, PersistenceThreadHandle}, + unstable::{NodeKey, RawNode}, Database, Key, MerkleTreeColumnFamily, NoVersionError, RocksDBWrapper, TreeEntry, TreeEntryWithProof, TreeInstruction, }; @@ -35,7 +36,7 @@ use zksync_types::{ use super::{ metrics::{LoadChangesStage, TreeUpdateStage, METRICS}, pruning::PruningHandles, - MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, }; /// General information about the Merkle tree. @@ -176,6 +177,40 @@ fn create_db_sync(config: &MetadataCalculatorConfig) -> anyhow::Result anyhow::Result { + tokio::task::spawn_blocking(move || { + let MerkleTreeReaderConfig { + db_path, + max_open_files, + multi_get_chunk_size, + block_cache_capacity, + include_indices_and_filters_in_block_cache, + } = config; + + tracing::info!( + "Initializing Merkle tree database at `{db_path}` (max open files: {max_open_files:?}) with {multi_get_chunk_size} multi-get chunk size, \ + {block_cache_capacity}B block cache (indices & filters included: {include_indices_and_filters_in_block_cache:?})" + ); + let mut db = RocksDB::with_options( + db_path.as_ref(), + RocksDBOptions { + block_cache_capacity: Some(block_cache_capacity), + include_indices_and_filters_in_block_cache, + max_open_files, + ..RocksDBOptions::default() + } + )?; + if cfg!(test) { + db = db.with_sync_writes(); + } + Ok(RocksDBWrapper::from(db)) + }) + .await + .context("panicked creating Merkle tree RocksDB")? +} + /// Wrapper around the "main" tree implementation used by [`MetadataCalculator`]. /// /// Async methods provided by this wrapper are not cancel-safe! This is probably not an issue; @@ -307,6 +342,13 @@ pub struct AsyncTreeReader { } impl AsyncTreeReader { + pub(super) fn new(db: RocksDBWrapper, mode: MerkleTreeMode) -> anyhow::Result { + Ok(Self { + inner: ZkSyncTreeReader::new(db)?, + mode, + }) + } + fn downgrade(&self) -> WeakAsyncTreeReader { WeakAsyncTreeReader { db: self.inner.db().clone().into_inner().downgrade(), @@ -366,6 +408,18 @@ impl AsyncTreeReader { .await .unwrap() } + + pub(crate) async fn raw_nodes(self, keys: Vec) -> Vec> { + tokio::task::spawn_blocking(move || self.inner.raw_nodes(&keys)) + .await + .unwrap() + } + + pub(crate) async fn raw_stale_keys(self, l1_batch_number: L1BatchNumber) -> Vec { + tokio::task::spawn_blocking(move || self.inner.raw_stale_keys(l1_batch_number)) + .await + .unwrap() + } } /// Version of async tree reader that holds a weak reference to RocksDB. Used in [`MerkleTreeHealthCheck`]. diff --git a/core/node/metadata_calculator/src/lib.rs b/core/node/metadata_calculator/src/lib.rs index 451090694b2..5c64330a0e7 100644 --- a/core/node/metadata_calculator/src/lib.rs +++ b/core/node/metadata_calculator/src/lib.rs @@ -27,6 +27,7 @@ pub use self::{ helpers::{AsyncTreeReader, LazyAsyncTreeReader, MerkleTreeInfo}, pruning::MerkleTreePruningTask, }; +use crate::helpers::create_readonly_db; pub mod api_server; mod helpers; @@ -264,3 +265,55 @@ impl MetadataCalculator { .await } } + +/// Configuration of [`TreeReaderTask`]. +#[derive(Debug, Clone)] +pub struct MerkleTreeReaderConfig { + /// Filesystem path to the RocksDB instance that stores the tree. + pub db_path: String, + /// Maximum number of files concurrently opened by RocksDB. Useful to fit into OS limits; can be used + /// as a rudimentary way to control RAM usage of the tree. + pub max_open_files: Option, + /// Chunk size for multi-get operations. Can speed up loading data for the Merkle tree on some environments, + /// but the effects vary wildly depending on the setup (e.g., the filesystem used). + pub multi_get_chunk_size: usize, + /// Capacity of RocksDB block cache in bytes. Reasonable values range from ~100 MiB to several GB. + pub block_cache_capacity: usize, + /// If specified, RocksDB indices and Bloom filters will be managed by the block cache, rather than + /// being loaded entirely into RAM on the RocksDB initialization. The block cache capacity should be increased + /// correspondingly; otherwise, RocksDB performance can significantly degrade. + pub include_indices_and_filters_in_block_cache: bool, +} + +/// Alternative to [`MetadataCalculator`] that provides readonly access to the Merkle tree. +#[derive(Debug)] +pub struct TreeReaderTask { + config: MerkleTreeReaderConfig, + tree_reader: watch::Sender>, +} + +impl TreeReaderTask { + /// Creates a new task with the provided configuration. + pub fn new(config: MerkleTreeReaderConfig) -> Self { + Self { + config, + tree_reader: watch::channel(None).0, + } + } + + /// Returns a reference to the tree reader. + pub fn tree_reader(&self) -> LazyAsyncTreeReader { + LazyAsyncTreeReader(self.tree_reader.subscribe()) + } + + /// Runs this task. The task exits on error, or when the tree reader is successfully initialized. + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let db = tokio::select! { + db_result = create_readonly_db(self.config) => db_result?, + _ = stop_receiver.changed() => return Ok(()), + }; + let reader = AsyncTreeReader::new(db, MerkleTreeMode::Lightweight)?; + self.tree_reader.send_replace(Some(reader)); + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 827ec69d942..4092ee6dcd5 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -7,7 +7,8 @@ use std::{ use anyhow::Context as _; use zksync_config::configs::{api::MerkleTreeApiConfig, database::MerkleTreeMode}; use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MerkleTreePruningTask, MetadataCalculator, MetadataCalculatorConfig, + LazyAsyncTreeReader, MerkleTreePruningTask, MerkleTreeReaderConfig, MetadataCalculator, + MetadataCalculatorConfig, TreeReaderTask, }; use zksync_storage::RocksDB; @@ -19,7 +20,7 @@ use crate::{ web3_api::TreeApiClientResource, }, service::{ShutdownHook, StopReceiver}, - task::{Task, TaskId}, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, FromContext, IntoContext, }; @@ -205,3 +206,65 @@ impl Task for MerkleTreePruningTask { (*self).run(stop_receiver.0).await } } + +/// Mutually exclusive with [`MetadataCalculatorLayer`]. +#[derive(Debug)] +pub struct TreeApiServerLayer { + config: MerkleTreeReaderConfig, + api_config: MerkleTreeApiConfig, +} + +impl TreeApiServerLayer { + pub fn new(config: MerkleTreeReaderConfig, api_config: MerkleTreeApiConfig) -> Self { + Self { config, api_config } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct TreeApiServerOutput { + tree_api_client: TreeApiClientResource, + #[context(task)] + tree_reader_task: TreeReaderTask, + #[context(task)] + tree_api_task: TreeApiTask, +} + +#[async_trait::async_trait] +impl WiringLayer for TreeApiServerLayer { + type Input = (); + type Output = TreeApiServerOutput; + + fn layer_name(&self) -> &'static str { + "tree_api_server" + } + + async fn wire(self, (): Self::Input) -> Result { + let tree_reader_task = TreeReaderTask::new(self.config); + let bind_addr = (Ipv4Addr::UNSPECIFIED, self.api_config.port).into(); + let tree_api_task = TreeApiTask { + bind_addr, + tree_reader: tree_reader_task.tree_reader(), + }; + Ok(TreeApiServerOutput { + tree_api_client: TreeApiClientResource(Arc::new(tree_reader_task.tree_reader())), + tree_api_task, + tree_reader_task, + }) + } +} + +#[async_trait::async_trait] +impl Task for TreeReaderTask { + fn kind(&self) -> TaskKind { + TaskKind::OneshotTask + } + + fn id(&self) -> TaskId { + "merkle_tree_reader_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} From c76da16efc769243a02c6e859376182d95ab941d Mon Sep 17 00:00:00 2001 From: Lyova Potyomkin Date: Fri, 25 Oct 2024 13:43:56 +0300 Subject: [PATCH 130/140] fix: extend allowed storage slots for validation as per EIP-7562 (#3166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ [EIP-7562](https://eips.ethereum.org/EIPS/eip-7562#validation-rules) allows reading `keccak256(address || x) + n` where `x` is `bytes32` and `n` is `0..128`. This PR adds support for the `+ n` as we didn't have it before. ## Why ❔ To support reading larger-than-1-slot structs from mappings, during validation ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --------- Co-authored-by: Vlad Bochok <41153528+vladbochok@users.noreply.github.com> --- core/lib/multivm/src/tracers/validator/mod.rs | 22 ++++++++++++++----- etc/env/base/chain.toml | 4 ++-- etc/env/base/contracts.toml | 4 ++-- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index a1573f24c66..057551a9efe 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -1,4 +1,8 @@ -use std::{collections::HashSet, marker::PhantomData, sync::Arc}; +use std::{ + collections::{BTreeSet, HashSet}, + marker::PhantomData, + sync::Arc, +}; use once_cell::sync::OnceCell; use zksync_system_constants::{ @@ -8,7 +12,7 @@ use zksync_system_constants::{ use zksync_types::{ vm::VmVersion, web3::keccak256, AccountTreeId, Address, StorageKey, H256, U256, }; -use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; +use zksync_utils::{address_to_u256, be_bytes_to_safe_address, u256_to_h256}; use self::types::{NewTrustedValidationItems, ValidationTracerMode}; use crate::{ @@ -32,7 +36,7 @@ mod vm_virtual_blocks; #[derive(Debug, Clone)] pub struct ValidationTracer { validation_mode: ValidationTracerMode, - auxilary_allowed_slots: HashSet, + auxilary_allowed_slots: BTreeSet, user_address: Address, #[allow(dead_code)] @@ -51,6 +55,8 @@ pub struct ValidationTracer { type ValidationRoundResult = Result; impl ValidationTracer { + const MAX_ALLOWED_SLOT_OFFSET: u32 = 127; + pub fn new( params: ValidationParams, vm_version: VmVersion, @@ -131,9 +137,15 @@ impl ValidationTracer { } // The user is allowed to touch its own slots or slots semantically related to him. + let from = u256_to_h256(key.saturating_sub(Self::MAX_ALLOWED_SLOT_OFFSET.into())); + let to = u256_to_h256(key); let valid_users_slot = address == self.user_address - || u256_to_account_address(&key) == self.user_address - || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); + || key == address_to_u256(&self.user_address) + || self + .auxilary_allowed_slots + .range(from..=to) + .next() + .is_some(); if valid_users_slot { return true; } diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 903696e3a81..6d1fdae53ce 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -90,8 +90,8 @@ fee_model_version = "V2" validation_computational_gas_limit = 300000 save_call_traces = true -bootloader_hash = "0x010008c37ecadea8b003884eb9d81fdfb7161b3b309504e5318f15da19c500d8" -default_aa_hash = "0x0100055da70d970f98ca4677a4b2fcecef5354f345cc5c6d13a78339e5fd87a9" +bootloader_hash = "0x010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b678" +default_aa_hash = "0x0100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe30" protective_reads_persistence_enabled = false diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index dbadbbc2c77..735da993058 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -26,8 +26,8 @@ RECURSION_NODE_LEVEL_VK_HASH = "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a2 RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" -GENESIS_ROOT = "0x28a7e67393021f957572495f8fdadc2c477ae3f4f413ae18c16cff6ee65680e2" -GENESIS_BATCH_COMMITMENT = "0xc57085380434970021d87774b377ce1bb12f5b6064af11595e70011965747def" +GENESIS_ROOT = "0x7275936e5a0063b159d5d22734931fea07871e8d57e564d61ef56e4a6ee23e5c" +GENESIS_BATCH_COMMITMENT = "0xf5f9a5abe62e8a6e0cb2d34d27435c3e5a8fbd7e2e54ca1d108fc58cb86c708a" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_ROLLUP_LEAF_INDEX = "54" From 8e75d4b812b21bc26e2c38ceeb711a8a530d7bc2 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 25 Oct 2024 16:31:15 +0300 Subject: [PATCH 131/140] feat(api): Integrate new VM into API server (no tracers) (#3033) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Integrates the new VM into API server for 3 use cases: gas estimation, calls, and tx execution (without the validation stage). ## Why ❔ These use cases do not require tracers and could benefit from faster VM execution (particularly gas estimation, which runs the VM multiple times). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --------- Co-authored-by: Joonatan Saarhelo --- .github/workflows/ci-core-reusable.yml | 10 +- Cargo.lock | 1 + core/bin/zksync_server/src/node_builder.rs | 8 +- core/lib/config/src/configs/experimental.rs | 5 + core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/vm_runner.rs | 2 + .../src/versions/testonly/l1_tx_execution.rs | 49 ++- .../versions/vm_fast/tests/l1_tx_execution.rs | 8 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 27 +- .../vm_latest/tests/l1_tx_execution.rs | 8 +- core/lib/multivm/src/vm_instance.rs | 2 +- core/lib/protobuf_config/src/experimental.rs | 17 +- .../src/proto/config/experimental.proto | 1 + core/lib/vm_executor/Cargo.toml | 1 + core/lib/vm_executor/src/oneshot/metrics.rs | 16 +- core/lib/vm_executor/src/oneshot/mod.rs | 331 ++++++++++++------ core/lib/vm_executor/src/oneshot/tests.rs | 107 ++++++ core/lib/vm_executor/src/testonly.rs | 32 +- core/lib/vm_interface/src/storage/mod.rs | 2 + .../lib/vm_interface/src/storage/overrides.rs | 70 ++++ core/lib/vm_interface/src/types/inputs/mod.rs | 2 +- .../src/execution_sandbox/execute.rs | 18 +- .../src/execution_sandbox/storage.rs | 148 +++----- .../src/execution_sandbox/validate.rs | 7 +- core/node/api_server/src/testonly.rs | 29 +- core/node/api_server/src/tx_sender/mod.rs | 8 + .../src/tx_sender/tests/gas_estimation.rs | 38 +- .../api_server/src/tx_sender/tests/mod.rs | 3 +- core/node/api_server/src/web3/tests/vm.rs | 5 +- core/node/consensus/src/vm.rs | 5 +- .../layers/web3_api/tx_sender.rs | 13 +- .../overrides/tests/integration.yaml | 4 + .../overrides/tests/loadtest-new.yaml | 4 + .../overrides/tests/loadtest-old.yaml | 1 + 34 files changed, 721 insertions(+), 262 deletions(-) create mode 100644 core/lib/vm_executor/src/oneshot/tests.rs create mode 100644 core/lib/vm_interface/src/storage/overrides.rs create mode 100644 etc/env/file_based/overrides/tests/integration.yaml diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index c79e3431576..fb43133868b 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -105,7 +105,7 @@ jobs: - name: Loadtest configuration run: | - echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env + echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 30000 || 16000 }} >> .env echo ACCOUNTS_AMOUNT="100" >> .env echo MAX_INFLIGHT_TXS="10" >> .env echo SYNC_API_REQUESTS_LIMIT="15" >> .env @@ -360,12 +360,16 @@ jobs: - name: Run servers run: | + # Override config for part of chains to test the default config as well + ci_run zkstack dev config-writer --path etc/env/file_based/overrides/tests/integration.yaml --chain era + ci_run zkstack dev config-writer --path etc/env/file_based/overrides/tests/integration.yaml --chain validium + ci_run zkstack server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & ci_run zkstack server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & ci_run zkstack server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & ci_run zkstack server --ignore-prerequisites --chain consensus \ - --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ - &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & + --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ + &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & ci_run sleep 5 diff --git a/Cargo.lock b/Cargo.lock index 64ae0a9a12f..de2c2d6c9b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11351,6 +11351,7 @@ dependencies = [ "assert_matches", "async-trait", "once_cell", + "test-casing", "tokio", "tracing", "vise", diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index e2bd487f22b..19edef6e4ee 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -309,10 +309,12 @@ impl MainNodeBuilder { latest_values_cache_size: rpc_config.latest_values_cache_size() as u64, latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(), }; + let vm_config = try_load_config!(self.configs.experimental_vm_config); // On main node we always use master pool sink. self.node.add_layer(MasterPoolSinkLayer); - self.node.add_layer(TxSenderLayer::new( + + let layer = TxSenderLayer::new( TxSenderConfig::new( &sk_config, &rpc_config, @@ -323,7 +325,9 @@ impl MainNodeBuilder { ), postgres_storage_caches_config, rpc_config.vm_concurrency_limit(), - )); + ); + let layer = layer.with_vm_mode(vm_config.api_fast_vm_mode); + self.node.add_layer(layer); Ok(self) } diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index 618cfd3d388..a87a221ef22 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -106,4 +106,9 @@ pub struct ExperimentalVmConfig { /// the new VM doesn't produce call traces and can diverge from the old VM! #[serde(default)] pub state_keeper_fast_vm_mode: FastVmMode, + + /// Fast VM mode to use in the API server. Currently, some operations are not supported by the fast VM (e.g., `debug_traceCall` + /// or transaction validation), so the legacy VM will always be used for them. + #[serde(default)] + pub api_fast_vm_mode: FastVmMode, } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 3bf4609bb70..f8e53e33042 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -336,6 +336,7 @@ impl Distribution for EncodeDist { configs::ExperimentalVmConfig { playground: self.sample(rng), state_keeper_fast_vm_mode: gen_fast_vm_mode(rng), + api_fast_vm_mode: gen_fast_vm_mode(rng), } } } diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs index 730a79dd340..0a29d1256bd 100644 --- a/core/lib/env_config/src/vm_runner.rs +++ b/core/lib/env_config/src/vm_runner.rs @@ -55,6 +55,7 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=new + EXPERIMENTAL_VM_API_FAST_VM_MODE=shadow EXPERIMENTAL_VM_PLAYGROUND_FAST_VM_MODE=shadow EXPERIMENTAL_VM_PLAYGROUND_DB_PATH=/db/vm_playground EXPERIMENTAL_VM_PLAYGROUND_FIRST_PROCESSED_BATCH=123 @@ -64,6 +65,7 @@ mod tests { let config = ExperimentalVmConfig::from_env().unwrap(); assert_eq!(config.state_keeper_fast_vm_mode, FastVmMode::New); + assert_eq!(config.api_fast_vm_mode, FastVmMode::Shadow); assert_eq!(config.playground.fast_vm_mode, FastVmMode::Shadow); assert_eq!(config.playground.db_path.unwrap(), "/db/vm_playground"); assert_eq!(config.playground.first_processed_batch, L1BatchNumber(123)); diff --git a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs index e98a8385f02..37a2bf2bec2 100644 --- a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs @@ -1,3 +1,4 @@ +use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::l1_messenger_contract; use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; @@ -5,13 +6,17 @@ use zksync_test_account::TxType; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, U256, + Address, Execute, ExecuteTransactionCommon, U256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; -use super::{read_test_contract, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; +use super::{ + read_test_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, BASE_SYSTEM_CONTRACTS, +}; use crate::{ - interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + interface::{ + ExecutionResult, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, VmRevertReason, + }, utils::StorageWritesDeduplicator, }; @@ -180,3 +185,41 @@ pub(crate) fn test_l1_tx_execution_high_gas_limit() { assert!(res.result.is_failed(), "The transaction should've failed"); } + +pub(crate) fn test_l1_tx_execution_gas_estimation_with_low_gas() { + let counter_contract = read_test_contract(); + let counter_address = Address::repeat_byte(0x11); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::EstimateFee) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_contract, + counter_address, + )]) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let mut tx = account.get_test_contract_transaction( + counter_address, + false, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + let ExecuteTransactionCommon::L1(data) = &mut tx.common_data else { + unreachable!(); + }; + // This gas limit is chosen so that transaction starts getting executed by the bootloader, but then runs out of gas + // before its execution result is posted. + data.gas_limit = 15_000.into(); + + vm.vm.push_transaction(tx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + assert_matches!( + &res.result, + ExecutionResult::Revert { output: VmRevertReason::General { msg, .. } } + if msg.contains("reverted with empty reason") + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 0174eeffd7e..f0295702017 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -1,6 +1,7 @@ use crate::{ versions::testonly::l1_tx_execution::{ - test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, + test_l1_tx_execution, test_l1_tx_execution_gas_estimation_with_low_gas, + test_l1_tx_execution_high_gas_limit, }, vm_fast::Vm, }; @@ -14,3 +15,8 @@ fn l1_tx_execution() { fn l1_tx_execution_high_gas_limit() { test_l1_tx_execution_high_gas_limit::>(); } + +#[test] +fn l1_tx_execution_gas_estimation_with_low_gas() { + test_l1_tx_execution_gas_estimation_with_low_gas::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index a2114a33948..6ebc4b9c571 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -51,8 +51,8 @@ use crate::{ }, vm_latest::{ constants::{ - get_vm_hook_params_start_position, get_vm_hook_position, OPERATOR_REFUNDS_OFFSET, - TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, + get_result_success_first_slot, get_vm_hook_params_start_position, get_vm_hook_position, + OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, }, MultiVMSubversion, }, @@ -213,7 +213,22 @@ impl Vm { } Hook::TxHasEnded => { if let VmExecutionMode::OneTx = execution_mode { - break (last_tx_result.take().unwrap(), false); + // The bootloader may invoke `TxHasEnded` hook without posting a tx result previously. One case when this can happen + // is estimating gas for L1 transactions, if a transaction runs out of gas during execution. + let tx_result = last_tx_result.take().unwrap_or_else(|| { + let tx_has_failed = self.get_tx_result().is_zero(); + if tx_has_failed { + let output = VmRevertReason::General { + msg: "Transaction reverted with empty reason. Possibly out of gas" + .to_string(), + data: vec![], + }; + ExecutionResult::Revert { output } + } else { + ExecutionResult::Success { output: vec![] } + } + }); + break (tx_result, false); } } Hook::AskOperatorForRefund => { @@ -361,6 +376,12 @@ impl Vm { .unwrap() } + fn get_tx_result(&self) -> U256 { + let tx_idx = self.bootloader_state.current_tx(); + let slot = get_result_success_first_slot(VM_VERSION) as usize + tx_idx; + self.read_word_from_bootloader_heap(slot) + } + fn get_debug_log(&self) -> (String, String) { let hook_params = self.get_hook_params(); let mut msg = u256_to_h256(hook_params[0]).as_bytes().to_vec(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 4b7429c2829..3b8a01dbc80 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,6 +1,7 @@ use crate::{ versions::testonly::l1_tx_execution::{ - test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, + test_l1_tx_execution, test_l1_tx_execution_gas_estimation_with_low_gas, + test_l1_tx_execution_high_gas_limit, }, vm_latest::{HistoryEnabled, Vm}, }; @@ -14,3 +15,8 @@ fn l1_tx_execution() { fn l1_tx_execution_high_gas_limit() { test_l1_tx_execution_high_gas_limit::>(); } + +#[test] +fn l1_tx_execution_gas_estimation_with_low_gas() { + test_l1_tx_execution_gas_estimation_with_low_gas::>(); +} diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 5ff27046377..e2f72bd2411 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -234,7 +234,7 @@ pub type ShadowedFastVm = ShadowVm< /// Fast VM variants. #[derive(Debug)] -pub enum FastVmInstance { +pub enum FastVmInstance { /// Fast VM running in isolation. Fast(crate::vm_fast::Vm, Tr>), /// Fast VM shadowed by the latest legacy VM. diff --git a/core/lib/protobuf_config/src/experimental.rs b/core/lib/protobuf_config/src/experimental.rs index 63fa0ca51eb..750dc7b04f0 100644 --- a/core/lib/protobuf_config/src/experimental.rs +++ b/core/lib/protobuf_config/src/experimental.rs @@ -7,6 +7,14 @@ use zksync_protobuf::{repr::ProtoRepr, required}; use crate::{proto::experimental as proto, read_optional_repr}; +fn parse_vm_mode(raw: Option) -> anyhow::Result { + Ok(raw + .map(proto::FastVmMode::try_from) + .transpose() + .context("fast_vm_mode")? + .map_or_else(FastVmMode::default, |mode| mode.parse())) +} + impl ProtoRepr for proto::Db { type Type = configs::ExperimentalDBConfig; @@ -105,12 +113,8 @@ impl ProtoRepr for proto::Vm { fn read(&self) -> anyhow::Result { Ok(Self::Type { playground: read_optional_repr(&self.playground).unwrap_or_default(), - state_keeper_fast_vm_mode: self - .state_keeper_fast_vm_mode - .map(proto::FastVmMode::try_from) - .transpose() - .context("fast_vm_mode")? - .map_or_else(FastVmMode::default, |mode| mode.parse()), + state_keeper_fast_vm_mode: parse_vm_mode(self.state_keeper_fast_vm_mode)?, + api_fast_vm_mode: parse_vm_mode(self.api_fast_vm_mode)?, }) } @@ -120,6 +124,7 @@ impl ProtoRepr for proto::Vm { state_keeper_fast_vm_mode: Some( proto::FastVmMode::new(this.state_keeper_fast_vm_mode).into(), ), + api_fast_vm_mode: Some(proto::FastVmMode::new(this.api_fast_vm_mode).into()), } } } diff --git a/core/lib/protobuf_config/src/proto/config/experimental.proto b/core/lib/protobuf_config/src/proto/config/experimental.proto index 5e1d045ca67..87af8d3835c 100644 --- a/core/lib/protobuf_config/src/proto/config/experimental.proto +++ b/core/lib/protobuf_config/src/proto/config/experimental.proto @@ -37,4 +37,5 @@ message VmPlayground { message Vm { optional VmPlayground playground = 1; // optional optional FastVmMode state_keeper_fast_vm_mode = 2; // optional; if not set, fast VM is not used + optional FastVmMode api_fast_vm_mode = 3; // optional; if not set, fast VM is not used } diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index a967aaa969a..06a531252c5 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -26,3 +26,4 @@ vise.workspace = true [dev-dependencies] assert_matches.workspace = true +test-casing.workspace = true diff --git a/core/lib/vm_executor/src/oneshot/metrics.rs b/core/lib/vm_executor/src/oneshot/metrics.rs index 475463300f1..13a832ee3c8 100644 --- a/core/lib/vm_executor/src/oneshot/metrics.rs +++ b/core/lib/vm_executor/src/oneshot/metrics.rs @@ -50,7 +50,7 @@ pub(super) fn report_vm_memory_metrics( tx_id: &str, memory_metrics: &VmMemoryMetrics, vm_execution_took: Duration, - storage_metrics: &StorageViewStats, + storage_stats: &StorageViewStats, ) { MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); @@ -65,10 +65,18 @@ pub(super) fn report_vm_memory_metrics( MEMORY_METRICS .storage_view_cache_size - .observe(storage_metrics.cache_size); + .observe(storage_stats.cache_size); MEMORY_METRICS .full - .observe(memory_metrics.full_size() + storage_metrics.cache_size); + .observe(memory_metrics.full_size() + storage_stats.cache_size); - STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_metrics); + report_vm_storage_metrics(tx_id, vm_execution_took, storage_stats); +} + +pub(super) fn report_vm_storage_metrics( + tx_id: &str, + vm_execution_took: Duration, + storage_stats: &StorageViewStats, +) { + STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_stats); } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index 5f9e4dd3c6f..154c838f824 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -17,23 +17,26 @@ use once_cell::sync::OnceCell; use zksync_multivm::{ interface::{ executor::{OneshotExecutor, TransactionValidator}, - storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, + storage::{ReadStorage, StorageView, StorageWithOverrides}, tracer::{ValidationError, ValidationParams}, - ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, + utils::{DivergenceHandler, ShadowVm}, + Call, ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, - VmInterface, + VmFactory, VmInterface, }, - tracers::{CallTracer, StorageInvocations, ValidationTracer}, + is_supported_by_fast_vm, + tracers::{CallTracer, StorageInvocations, TracerDispatcher, ValidationTracer}, utils::adjust_pubdata_price_for_tx, - vm_latest::HistoryDisabled, + vm_latest::{HistoryDisabled, HistoryEnabled}, zk_evm_latest::ethereum_types::U256, - LegacyVmInstance, MultiVMTracer, + FastVmInstance, HistoryMode, LegacyVmInstance, MultiVMTracer, }; use zksync_types::{ block::pack_block_info, get_nonce_key, l2::L2Tx, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + vm::FastVmMode, AccountTreeId, Nonce, StorageKey, Transaction, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, }; @@ -54,10 +57,14 @@ mod contracts; mod env; mod metrics; mod mock; +#[cfg(test)] +mod tests; /// Main [`OneshotExecutor`] implementation used by the API server. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct MainOneshotExecutor { + fast_vm_mode: FastVmMode, + panic_on_divergence: bool, missed_storage_invocation_limit: usize, execution_latency_histogram: Option<&'static vise::Histogram>, } @@ -67,11 +74,28 @@ impl MainOneshotExecutor { /// The limit is applied for calls and gas estimations, but not during transaction validation. pub fn new(missed_storage_invocation_limit: usize) -> Self { Self { + fast_vm_mode: FastVmMode::Old, + panic_on_divergence: false, missed_storage_invocation_limit, execution_latency_histogram: None, } } + /// Sets the fast VM mode used by this executor. + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + if !matches!(fast_vm_mode, FastVmMode::Old) { + tracing::warn!( + "Running new VM with modes {fast_vm_mode:?}; this can lead to incorrect node behavior" + ); + } + self.fast_vm_mode = fast_vm_mode; + } + + /// Causes the VM to panic on divergence whenever it executes in the shadow mode. By default, a divergence is logged on `ERROR` level. + pub fn panic_on_divergence(&mut self) { + self.panic_on_divergence = true; + } + /// Sets a histogram for measuring VM execution latency. pub fn set_execution_latency_histogram( &mut self, @@ -79,19 +103,31 @@ impl MainOneshotExecutor { ) { self.execution_latency_histogram = Some(histogram); } + + fn select_fast_vm_mode( + &self, + env: &OneshotEnv, + tracing_params: &OneshotTracingParams, + ) -> FastVmMode { + if tracing_params.trace_calls || !is_supported_by_fast_vm(env.system.version) { + FastVmMode::Old // the fast VM doesn't support call tracing or old protocol versions + } else { + self.fast_vm_mode + } + } } #[async_trait] -impl OneshotExecutor for MainOneshotExecutor +impl OneshotExecutor> for MainOneshotExecutor where S: ReadStorage + Send + 'static, { async fn inspect_transaction_with_bytecode_compression( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, args: TxExecutionArgs, - params: OneshotTracingParams, + tracing_params: OneshotTracingParams, ) -> anyhow::Result { let missed_storage_invocation_limit = match env.system.execution_mode { // storage accesses are not limited for tx validation @@ -100,35 +136,24 @@ where self.missed_storage_invocation_limit } }; - let execution_latency_histogram = self.execution_latency_histogram; + let sandbox = VmSandbox { + fast_vm_mode: self.select_fast_vm_mode(&env, &tracing_params), + panic_on_divergence: self.panic_on_divergence, + storage, + env, + execution_args: args, + execution_latency_histogram: self.execution_latency_histogram, + }; tokio::task::spawn_blocking(move || { - let mut tracers = vec![]; - let mut calls_result = Arc::>::default(); - if params.trace_calls { - tracers.push(CallTracer::new(calls_result.clone()).into_tracer_pointer()); - } - tracers.push( - StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer(), - ); - - let executor = VmSandbox::new(storage, env, args, execution_latency_histogram); - let mut result = executor.apply(|vm, transaction| { - let (compression_result, tx_result) = vm - .inspect_transaction_with_bytecode_compression( - &mut tracers.into(), - transaction, - true, - ); - OneshotTransactionExecutionResult { - tx_result: Box::new(tx_result), - compression_result: compression_result.map(drop), - call_traces: vec![], - } - }); - - result.call_traces = Arc::make_mut(&mut calls_result).take().unwrap_or_default(); - result + sandbox.execute_in_vm(|vm, transaction| { + vm.inspect_transaction_with_bytecode_compression( + missed_storage_invocation_limit, + tracing_params, + transaction, + true, + ) + }) }) .await .context("VM execution panicked") @@ -136,13 +161,13 @@ where } #[async_trait] -impl TransactionValidator for MainOneshotExecutor +impl TransactionValidator> for MainOneshotExecutor where S: ReadStorage + Send + 'static, { async fn validate_transaction( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, @@ -152,23 +177,28 @@ where "Unexpected execution mode for tx validation: {:?} (expected `VerifyExecute`)", env.system.execution_mode ); - let execution_latency_histogram = self.execution_latency_histogram; + + let sandbox = VmSandbox { + fast_vm_mode: FastVmMode::Old, + panic_on_divergence: self.panic_on_divergence, + storage, + env, + execution_args: TxExecutionArgs::for_validation(tx), + execution_latency_histogram: self.execution_latency_histogram, + }; tokio::task::spawn_blocking(move || { let (validation_tracer, mut validation_result) = ValidationTracer::::new( validation_params, - env.system.version.into(), + sandbox.env.system.version.into(), ); let tracers = vec![validation_tracer.into_tracer_pointer()]; - let executor = VmSandbox::new( - storage, - env, - TxExecutionArgs::for_validation(tx), - execution_latency_histogram, - ); - let exec_result = executor.apply(|vm, transaction| { + let exec_result = sandbox.execute_in_vm(|vm, transaction| { + let Vm::Legacy(vm) = vm else { + unreachable!("Fast VM is never used for validation yet"); + }; vm.push_transaction(transaction); vm.inspect(&mut tracers.into(), InspectExecutionMode::OneTx) }); @@ -188,70 +218,99 @@ where } #[derive(Debug)] -struct VmSandbox { - vm: Box>, - storage_view: StoragePtr>, - transaction: Transaction, - execution_latency_histogram: Option<&'static vise::Histogram>, +enum Vm { + Legacy(LegacyVmInstance), + Fast(FastVmInstance), } -impl VmSandbox { - /// This method is blocking. - fn new( - storage: S, - mut env: OneshotEnv, - execution_args: TxExecutionArgs, - execution_latency_histogram: Option<&'static vise::Histogram>, - ) -> Self { - let mut storage_view = StorageView::new(storage); - Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); - - let protocol_version = env.system.version; - if execution_args.adjust_pubdata_price { - env.l1_batch.fee_input = adjust_pubdata_price_for_tx( - env.l1_batch.fee_input, - execution_args.transaction.gas_per_pubdata_byte_limit(), - env.l1_batch.enforced_base_fee.map(U256::from), - protocol_version.into(), - ); +impl Vm { + fn inspect_transaction_with_bytecode_compression( + &mut self, + missed_storage_invocation_limit: usize, + params: OneshotTracingParams, + tx: Transaction, + with_compression: bool, + ) -> OneshotTransactionExecutionResult { + let mut calls_result = Arc::>::default(); + let (compression_result, tx_result) = match self { + Self::Legacy(vm) => { + let mut tracers = Self::create_legacy_tracers( + missed_storage_invocation_limit, + params.trace_calls.then(|| calls_result.clone()), + ); + vm.inspect_transaction_with_bytecode_compression(&mut tracers, tx, with_compression) + } + Self::Fast(vm) => { + assert!( + !params.trace_calls, + "Call tracing is not supported by fast VM yet" + ); + let legacy_tracers = Self::create_legacy_tracers::( + missed_storage_invocation_limit, + None, + ); + let mut full_tracer = (legacy_tracers.into(), ()); + vm.inspect_transaction_with_bytecode_compression( + &mut full_tracer, + tx, + with_compression, + ) + } }; - let storage_view = storage_view.to_rc_ptr(); - let vm = Box::new(LegacyVmInstance::new_with_specific_version( - env.l1_batch, - env.system, - storage_view.clone(), - protocol_version.into_api_vm_version(), - )); + OneshotTransactionExecutionResult { + tx_result: Box::new(tx_result), + compression_result: compression_result.map(drop), + call_traces: Arc::make_mut(&mut calls_result).take().unwrap_or_default(), + } + } - Self { - vm, - storage_view, - transaction: execution_args.transaction, - execution_latency_histogram, + fn create_legacy_tracers( + missed_storage_invocation_limit: usize, + calls_result: Option>>>, + ) -> TracerDispatcher, H> { + let mut tracers = vec![]; + if let Some(calls_result) = calls_result { + tracers.push(CallTracer::new(calls_result).into_tracer_pointer()); } + tracers + .push(StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer()); + tracers.into() } +} +/// Full parameters necessary to instantiate a VM for oneshot execution. +#[derive(Debug)] +struct VmSandbox { + fast_vm_mode: FastVmMode, + panic_on_divergence: bool, + storage: StorageWithOverrides, + env: OneshotEnv, + execution_args: TxExecutionArgs, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl VmSandbox { /// This method is blocking. - fn setup_storage_view( - storage_view: &mut StorageView, + fn setup_storage( + storage: &mut StorageWithOverrides, execution_args: &TxExecutionArgs, current_block: Option, ) { let storage_view_setup_started_at = Instant::now(); if let Some(nonce) = execution_args.enforced_nonce { let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); - let full_nonce = storage_view.read_value(&nonce_key); + let full_nonce = storage.read_value(&nonce_key); let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + storage.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); } let payer = execution_args.transaction.payer(); let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + let mut current_balance = h256_to_u256(storage.read_value(&balance_key)); current_balance += execution_args.added_balance; - storage_view.set_value(balance_key, u256_to_h256(current_balance)); + storage.set_value(balance_key, u256_to_h256(current_balance)); // Reset L2 block info if necessary. if let Some(current_block) = current_block { @@ -261,13 +320,13 @@ impl VmSandbox { ); let l2_block_info = pack_block_info(current_block.number.into(), current_block.timestamp); - storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + storage.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); let l2_block_txs_rolling_hash_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ); - storage_view.set_value( + storage.set_value( l2_block_txs_rolling_hash_key, current_block.txs_rolling_hash, ); @@ -280,30 +339,90 @@ impl VmSandbox { } } - pub(super) fn apply(mut self, apply_fn: F) -> T - where - F: FnOnce(&mut LegacyVmInstance, Transaction) -> T, - { + /// This method is blocking. + fn execute_in_vm( + mut self, + action: impl FnOnce(&mut Vm>, Transaction) -> T, + ) -> T { + Self::setup_storage( + &mut self.storage, + &self.execution_args, + self.env.current_block, + ); + + let protocol_version = self.env.system.version; + let mode = self.env.system.execution_mode; + if self.execution_args.adjust_pubdata_price { + self.env.l1_batch.fee_input = adjust_pubdata_price_for_tx( + self.env.l1_batch.fee_input, + self.execution_args.transaction.gas_per_pubdata_byte_limit(), + self.env.l1_batch.enforced_base_fee.map(U256::from), + protocol_version.into(), + ); + }; + + let transaction = self.execution_args.transaction; let tx_id = format!( "{:?}-{}", - self.transaction.initiator_account(), - self.transaction.nonce().unwrap_or(Nonce(0)) + transaction.initiator_account(), + transaction.nonce().unwrap_or(Nonce(0)) ); + let storage_view = StorageView::new(self.storage).to_rc_ptr(); + let mut vm = match self.fast_vm_mode { + FastVmMode::Old => Vm::Legacy(LegacyVmInstance::new_with_specific_version( + self.env.l1_batch, + self.env.system, + storage_view.clone(), + protocol_version.into_api_vm_version(), + )), + FastVmMode::New => Vm::Fast(FastVmInstance::fast( + self.env.l1_batch, + self.env.system, + storage_view.clone(), + )), + FastVmMode::Shadow => { + let mut vm = + ShadowVm::new(self.env.l1_batch, self.env.system, storage_view.clone()); + if !self.panic_on_divergence { + let transaction = format!("{:?}", transaction); + let handler = DivergenceHandler::new(move |errors, _| { + tracing::error!(transaction, ?mode, "{errors}"); + }); + vm.set_divergence_handler(handler); + } + Vm::Fast(FastVmInstance::Shadowed(vm)) + } + }; + let started_at = Instant::now(); - let result = apply_fn(&mut *self.vm, self.transaction); + let result = action(&mut vm, transaction); let vm_execution_took = started_at.elapsed(); if let Some(histogram) = self.execution_latency_histogram { histogram.observe(vm_execution_took); } - let memory_metrics = self.vm.record_vm_memory_metrics(); - metrics::report_vm_memory_metrics( - &tx_id, - &memory_metrics, - vm_execution_took, - &self.storage_view.borrow().stats(), - ); + + match &vm { + Vm::Legacy(vm) => { + let memory_metrics = vm.record_vm_memory_metrics(); + metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + &storage_view.borrow().stats(), + ); + } + Vm::Fast(_) => { + // The new VM implementation doesn't have the same memory model as old ones, so it doesn't report memory metrics, + // only storage-related ones. + metrics::report_vm_storage_metrics( + &format!("Tx {tx_id}"), + vm_execution_took, + &storage_view.borrow().stats(), + ); + } + } result } } diff --git a/core/lib/vm_executor/src/oneshot/tests.rs b/core/lib/vm_executor/src/oneshot/tests.rs new file mode 100644 index 00000000000..65d2ff3727c --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/tests.rs @@ -0,0 +1,107 @@ +//! Oneshot executor tests. + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; +use zksync_multivm::interface::storage::InMemoryStorage; +use zksync_types::{ProtocolVersionId, H256}; +use zksync_utils::bytecode::hash_bytecode; + +use super::*; +use crate::testonly::{ + create_l2_transaction, default_l1_batch_env, default_system_env, FAST_VM_MODES, +}; + +const EXEC_MODES: [TxExecutionMode; 3] = [ + TxExecutionMode::EstimateFee, + TxExecutionMode::EthCall, + TxExecutionMode::VerifyExecute, +]; + +#[test] +fn selecting_vm_for_execution() { + let mut executor = MainOneshotExecutor::new(usize::MAX); + executor.set_fast_vm_mode(FastVmMode::New); + + for exec_mode in EXEC_MODES { + let env = OneshotEnv { + system: default_system_env(exec_mode), + l1_batch: default_l1_batch_env(1), + current_block: None, + }; + let mode = executor.select_fast_vm_mode(&env, &OneshotTracingParams::default()); + assert_matches!(mode, FastVmMode::New); + + // Tracing calls is not supported by the new VM. + let mode = executor.select_fast_vm_mode(&env, &OneshotTracingParams { trace_calls: true }); + assert_matches!(mode, FastVmMode::Old); + + // Old protocol versions are not supported either. + let mut old_env = env.clone(); + old_env.system.version = ProtocolVersionId::Version22; + let mode = executor.select_fast_vm_mode(&old_env, &OneshotTracingParams::default()); + assert_matches!(mode, FastVmMode::Old); + } +} + +#[test] +fn setting_up_nonce_and_balance_in_storage() { + let mut storage = StorageWithOverrides::new(InMemoryStorage::default()); + let tx = create_l2_transaction(1_000_000_000.into(), Nonce(1)); + let execution_args = TxExecutionArgs::for_gas_estimate(tx.clone().into()); + VmSandbox::setup_storage(&mut storage, &execution_args, None); + + // Check the overridden nonce and balance. + let nonce_key = get_nonce_key(&tx.initiator_account()); + assert_eq!(storage.read_value(&nonce_key), H256::from_low_u64_be(1)); + let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); + let expected_added_balance = tx.common_data.fee.gas_limit * tx.common_data.fee.max_fee_per_gas; + assert_eq!( + storage.read_value(&balance_key), + u256_to_h256(expected_added_balance) + ); + + let mut storage = InMemoryStorage::default(); + storage.set_value(balance_key, H256::from_low_u64_be(2_000_000_000)); + let mut storage = StorageWithOverrides::new(storage); + VmSandbox::setup_storage(&mut storage, &execution_args, None); + + assert_eq!( + storage.read_value(&balance_key), + u256_to_h256(expected_added_balance + U256::from(2_000_000_000)) + ); +} + +#[test_casing(9, Product((EXEC_MODES, FAST_VM_MODES)))] +#[tokio::test] +async fn inspecting_transfer(exec_mode: TxExecutionMode, fast_vm_mode: FastVmMode) { + let tx = create_l2_transaction(1_000_000_000.into(), Nonce(0)); + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + storage.set_value( + storage_key_for_eth_balance(&tx.initiator_account()), + u256_to_h256(u64::MAX.into()), + ); + let storage = StorageWithOverrides::new(storage); + + let l1_batch = default_l1_batch_env(1); + let env = OneshotEnv { + system: default_system_env(exec_mode), + current_block: Some(StoredL2BlockEnv { + number: l1_batch.first_l2_block.number - 1, + timestamp: l1_batch.first_l2_block.timestamp - 1, + txs_rolling_hash: H256::zero(), + }), + l1_batch, + }; + let args = TxExecutionArgs::for_gas_estimate(tx.into()); + let tracing = OneshotTracingParams::default(); + + let mut executor = MainOneshotExecutor::new(usize::MAX); + executor.set_fast_vm_mode(fast_vm_mode); + let result = executor + .inspect_transaction_with_bytecode_compression(storage, env, args, tracing) + .await + .unwrap(); + result.compression_result.unwrap(); + let exec_result = result.tx_result.result; + assert!(!exec_result.is_failed(), "{exec_result:?}"); +} diff --git a/core/lib/vm_executor/src/testonly.rs b/core/lib/vm_executor/src/testonly.rs index 5bcd604a432..2fa7f075db7 100644 --- a/core/lib/vm_executor/src/testonly.rs +++ b/core/lib/vm_executor/src/testonly.rs @@ -2,11 +2,14 @@ use once_cell::sync::Lazy; use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + utils::derive_base_fee_and_gas_per_pubdata, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + zk_evm_latest::ethereum_types::U256, }; use zksync_types::{ - block::L2BlockHasher, fee_model::BatchFeeInput, vm::FastVmMode, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, + block::L2BlockHasher, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, + transaction_request::PaymasterParams, vm::FastVmMode, Address, K256PrivateKey, L1BatchNumber, + L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; static BASE_SYSTEM_CONTRACTS: Lazy = @@ -43,3 +46,28 @@ pub(crate) fn default_l1_batch_env(number: u32) -> L1BatchEnv { fee_input: BatchFeeInput::sensible_l1_pegged_default(), } } + +pub(crate) fn create_l2_transaction(value: U256, nonce: Nonce) -> L2Tx { + let (max_fee_per_gas, gas_per_pubdata_limit) = derive_base_fee_and_gas_per_pubdata( + BatchFeeInput::sensible_l1_pegged_default(), + ProtocolVersionId::latest().into(), + ); + let fee = Fee { + gas_limit: 10_000_000.into(), + max_fee_per_gas: max_fee_per_gas.into(), + max_priority_fee_per_gas: 0_u64.into(), + gas_per_pubdata_limit: gas_per_pubdata_limit.into(), + }; + L2Tx::new_signed( + Some(Address::random()), + vec![], + nonce, + fee, + value, + L2ChainId::default(), + &K256PrivateKey::random(), + vec![], + PaymasterParams::default(), + ) + .unwrap() +} diff --git a/core/lib/vm_interface/src/storage/mod.rs b/core/lib/vm_interface/src/storage/mod.rs index 6cdcd33db68..aade56ca5d9 100644 --- a/core/lib/vm_interface/src/storage/mod.rs +++ b/core/lib/vm_interface/src/storage/mod.rs @@ -5,11 +5,13 @@ use zksync_types::{get_known_code_key, StorageKey, StorageValue, H256}; pub use self::{ // Note, that `test_infra` of the bootloader tests relies on this value to be exposed in_memory::{InMemoryStorage, IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID}, + overrides::StorageWithOverrides, snapshot::{StorageSnapshot, StorageWithSnapshot}, view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewStats}, }; mod in_memory; +mod overrides; mod snapshot; mod view; diff --git a/core/lib/vm_interface/src/storage/overrides.rs b/core/lib/vm_interface/src/storage/overrides.rs new file mode 100644 index 00000000000..ad5a3d8624f --- /dev/null +++ b/core/lib/vm_interface/src/storage/overrides.rs @@ -0,0 +1,70 @@ +//! VM storage functionality specifically used in the VM sandbox. + +use std::{ + collections::{HashMap, HashSet}, + fmt, +}; + +use zksync_types::{AccountTreeId, StorageKey, StorageValue, H256}; + +use super::ReadStorage; + +/// A storage view that allows to override some of the storage values. +#[derive(Debug)] +pub struct StorageWithOverrides { + storage_handle: S, + overridden_slots: HashMap, + overridden_factory_deps: HashMap>, + empty_accounts: HashSet, +} + +impl StorageWithOverrides { + /// Creates a new storage view based on the underlying storage. + pub fn new(storage: S) -> Self { + Self { + storage_handle: storage, + overridden_slots: HashMap::new(), + overridden_factory_deps: HashMap::new(), + empty_accounts: HashSet::new(), + } + } + + pub fn set_value(&mut self, key: StorageKey, value: StorageValue) { + self.overridden_slots.insert(key, value); + } + + pub fn store_factory_dep(&mut self, hash: H256, code: Vec) { + self.overridden_factory_deps.insert(hash, code); + } + + pub fn insert_erased_account(&mut self, account: AccountTreeId) { + self.empty_accounts.insert(account); + } +} + +impl ReadStorage for StorageWithOverrides { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + if let Some(value) = self.overridden_slots.get(key) { + return *value; + } + if self.empty_accounts.contains(key.account()) { + return H256::zero(); + } + self.storage_handle.read_value(key) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.storage_handle.is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.overridden_factory_deps + .get(&hash) + .cloned() + .or_else(|| self.storage_handle.load_factory_dep(hash)) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.storage_handle.get_enumeration_index(key) + } +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index cb80ba7c138..83f87f0fe1d 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -15,7 +15,7 @@ mod l2_block; mod system_env; /// Full environment for oneshot transaction / call execution. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct OneshotEnv { /// System environment. pub system: SystemEnv, diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index bdd57462588..7958b5ed3c1 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -8,7 +8,7 @@ use tokio::runtime::Handle; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{ executor::{OneshotExecutor, TransactionValidator}, - storage::ReadStorage, + storage::{ReadStorage, StorageWithOverrides}, tracer::{ValidationError, ValidationParams}, Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, @@ -20,11 +20,10 @@ use zksync_types::{ use zksync_vm_executor::oneshot::{MainOneshotExecutor, MockOneshotExecutor}; use super::{ - storage::StorageWithOverrides, vm_metrics::{self, SandboxStage}, BlockArgs, VmPermit, SANDBOX_METRICS, }; -use crate::tx_sender::SandboxExecutorOptions; +use crate::{execution_sandbox::storage::apply_state_override, tx_sender::SandboxExecutorOptions}; /// Action that can be executed by [`SandboxExecutor`]. #[derive(Debug)] @@ -109,6 +108,9 @@ impl SandboxExecutor { missed_storage_invocation_limit: usize, ) -> Self { let mut executor = MainOneshotExecutor::new(missed_storage_invocation_limit); + executor.set_fast_vm_mode(options.fast_vm_mode); + #[cfg(test)] + executor.panic_on_divergence(); executor .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); Self { @@ -151,7 +153,7 @@ impl SandboxExecutor { .await?; let state_override = state_override.unwrap_or_default(); - let storage = StorageWithOverrides::new(storage, &state_override); + let storage = apply_state_override(storage, &state_override); let (execution_args, tracing_params) = action.into_parts(); let result = self .inspect_transaction_with_bytecode_compression( @@ -246,13 +248,13 @@ impl SandboxExecutor { } #[async_trait] -impl OneshotExecutor for SandboxExecutor +impl OneshotExecutor> for SandboxExecutor where S: ReadStorage + Send + 'static, { async fn inspect_transaction_with_bytecode_compression( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, args: TxExecutionArgs, tracing_params: OneshotTracingParams, @@ -283,13 +285,13 @@ where } #[async_trait] -impl TransactionValidator for SandboxExecutor +impl TransactionValidator> for SandboxExecutor where S: ReadStorage + Send + 'static, { async fn validate_transaction( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, diff --git a/core/node/api_server/src/execution_sandbox/storage.rs b/core/node/api_server/src/execution_sandbox/storage.rs index bf775d48490..c80356f6e36 100644 --- a/core/node/api_server/src/execution_sandbox/storage.rs +++ b/core/node/api_server/src/execution_sandbox/storage.rs @@ -1,127 +1,67 @@ //! VM storage functionality specifically used in the VM sandbox. -use std::{ - collections::{HashMap, HashSet}, - fmt, -}; - -use zksync_multivm::interface::storage::ReadStorage; +use zksync_multivm::interface::storage::{ReadStorage, StorageWithOverrides}; use zksync_types::{ api::state_override::{OverrideState, StateOverride}, get_code_key, get_known_code_key, get_nonce_key, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, - AccountTreeId, StorageKey, StorageValue, H256, + AccountTreeId, StorageKey, H256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; -/// A storage view that allows to override some of the storage values. -#[derive(Debug)] -pub(super) struct StorageWithOverrides { - storage_handle: S, - overridden_slots: HashMap, - overridden_factory_deps: HashMap>, - overridden_accounts: HashSet, -} - -impl StorageWithOverrides { - /// Creates a new storage view based on the underlying storage. - pub(super) fn new(storage: S, state_override: &StateOverride) -> Self { - let mut this = Self { - storage_handle: storage, - overridden_slots: HashMap::new(), - overridden_factory_deps: HashMap::new(), - overridden_accounts: HashSet::new(), - }; - this.apply_state_override(state_override); - this - } - - fn apply_state_override(&mut self, state_override: &StateOverride) { - for (account, overrides) in state_override.iter() { - if let Some(balance) = overrides.balance { - let balance_key = storage_key_for_eth_balance(account); - self.overridden_slots - .insert(balance_key, u256_to_h256(balance)); - } +/// This method is blocking. +pub(super) fn apply_state_override( + storage: S, + state_override: &StateOverride, +) -> StorageWithOverrides { + let mut storage = StorageWithOverrides::new(storage); + for (account, overrides) in state_override.iter() { + if let Some(balance) = overrides.balance { + let balance_key = storage_key_for_eth_balance(account); + storage.set_value(balance_key, u256_to_h256(balance)); + } - if let Some(nonce) = overrides.nonce { - let nonce_key = get_nonce_key(account); - let full_nonce = self.read_value(&nonce_key); - let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); - let new_full_nonce = u256_to_h256(nonces_to_full_nonce(nonce, deployment_nonce)); - self.overridden_slots.insert(nonce_key, new_full_nonce); - } + if let Some(nonce) = overrides.nonce { + let nonce_key = get_nonce_key(account); + let full_nonce = storage.read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let new_full_nonce = u256_to_h256(nonces_to_full_nonce(nonce, deployment_nonce)); + storage.set_value(nonce_key, new_full_nonce); + } - if let Some(code) = &overrides.code { - let code_key = get_code_key(account); - let code_hash = code.hash(); - self.overridden_slots.insert(code_key, code_hash); - let known_code_key = get_known_code_key(&code_hash); - self.overridden_slots - .insert(known_code_key, H256::from_low_u64_be(1)); - self.store_factory_dep(code_hash, code.clone().into_bytes()); - } + if let Some(code) = &overrides.code { + let code_key = get_code_key(account); + let code_hash = code.hash(); + storage.set_value(code_key, code_hash); + let known_code_key = get_known_code_key(&code_hash); + storage.set_value(known_code_key, H256::from_low_u64_be(1)); + storage.store_factory_dep(code_hash, code.clone().into_bytes()); + } - match &overrides.state { - Some(OverrideState::State(state)) => { - let account = AccountTreeId::new(*account); - self.override_account_state_diff(account, state); - self.overridden_accounts.insert(account); + match &overrides.state { + Some(OverrideState::State(state)) => { + let account = AccountTreeId::new(*account); + for (&key, &value) in state { + storage.set_value(StorageKey::new(account, key), value); } - Some(OverrideState::StateDiff(state_diff)) => { - let account = AccountTreeId::new(*account); - self.override_account_state_diff(account, state_diff); + storage.insert_erased_account(account); + } + Some(OverrideState::StateDiff(state_diff)) => { + let account = AccountTreeId::new(*account); + for (&key, &value) in state_diff { + storage.set_value(StorageKey::new(account, key), value); } - None => { /* do nothing */ } } + None => { /* do nothing */ } } } - - fn store_factory_dep(&mut self, hash: H256, code: Vec) { - self.overridden_factory_deps.insert(hash, code); - } - - fn override_account_state_diff( - &mut self, - account: AccountTreeId, - state_diff: &HashMap, - ) { - let account_slots = state_diff - .iter() - .map(|(&slot, &value)| (StorageKey::new(account, slot), value)); - self.overridden_slots.extend(account_slots); - } -} - -impl ReadStorage for StorageWithOverrides { - fn read_value(&mut self, key: &StorageKey) -> StorageValue { - if let Some(value) = self.overridden_slots.get(key) { - return *value; - } - if self.overridden_accounts.contains(key.account()) { - return H256::zero(); - } - self.storage_handle.read_value(key) - } - - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - self.storage_handle.is_write_initial(key) - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - self.overridden_factory_deps - .get(&hash) - .cloned() - .or_else(|| self.storage_handle.load_factory_dep(hash)) - } - - fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - self.storage_handle.get_enumeration_index(key) - } + storage } #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_multivm::interface::storage::InMemoryStorage; use zksync_types::{ api::state_override::{Bytecode, OverrideAccount}, @@ -184,7 +124,7 @@ mod tests { storage.set_value(retained_key, H256::repeat_byte(0xfe)); let erased_key = StorageKey::new(AccountTreeId::new(Address::repeat_byte(5)), H256::zero()); storage.set_value(erased_key, H256::repeat_byte(1)); - let mut storage = StorageWithOverrides::new(storage, &overrides); + let mut storage = apply_state_override(storage, &overrides); let balance = storage.read_value(&storage_key_for_eth_balance(&Address::repeat_byte(1))); assert_eq!(balance, H256::from_low_u64_be(1)); diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index 9a3c88f8bf0..758547abbd6 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -5,16 +5,15 @@ use tracing::Instrument; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{ executor::TransactionValidator, + storage::StorageWithOverrides, tracer::{ValidationError as RawValidationError, ValidationParams}, }; use zksync_types::{ - api::state_override::StateOverride, fee_model::BatchFeeInput, l2::L2Tx, Address, - TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, + fee_model::BatchFeeInput, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, }; use super::{ execute::{SandboxAction, SandboxExecutor}, - storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, BlockArgs, VmPermit, }; @@ -57,7 +56,7 @@ impl SandboxExecutor { let SandboxAction::Execution { tx, .. } = action else { unreachable!(); // by construction }; - let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + let storage = StorageWithOverrides::new(storage); let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); let validation_result = self diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 6da8e333495..3add9c2f165 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -10,7 +10,7 @@ use zksync_contracts::{ }; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_system_constants::{L2_BASE_TOKEN_ADDRESS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE}; use zksync_types::{ api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, ethabi, @@ -18,11 +18,12 @@ use zksync_types::{ fee::Fee, fee_model::FeeParams, get_code_key, get_known_code_key, + l1::L1Tx, l2::L2Tx, - transaction_request::{CallRequest, PaymasterParams}, + transaction_request::{CallRequest, Eip712Meta, PaymasterParams}, utils::storage_key_for_eth_balance, AccountTreeId, Address, K256PrivateKey, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - StorageKey, StorageLog, H256, U256, + StorageKey, StorageLog, EIP_712_TX_TYPE, H256, U256, }; use zksync_utils::{address_to_u256, u256_to_h256}; @@ -343,6 +344,8 @@ pub(crate) trait TestAccount { fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx; + fn create_l1_counter_tx(&self, increment: U256, revert: bool) -> L1Tx; + fn query_counter_value(&self) -> CallRequest; fn create_infinite_loop_tx(&self) -> L2Tx; @@ -482,6 +485,26 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn create_l1_counter_tx(&self, increment: U256, revert: bool) -> L1Tx { + let calldata = load_contract(COUNTER_CONTRACT_PATH) + .function("incrementWithRevert") + .expect("no `incrementWithRevert` function") + .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) + .expect("failed encoding `incrementWithRevert` input"); + let request = CallRequest { + data: Some(calldata.into()), + from: Some(self.address()), + to: Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + transaction_type: Some(EIP_712_TX_TYPE.into()), + eip712_meta: Some(Eip712Meta { + gas_per_pubdata: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Eip712Meta::default() + }), + ..CallRequest::default() + }; + L1Tx::from_request(request, false).unwrap() + } + fn query_counter_value(&self) -> CallRequest { let calldata = load_contract(COUNTER_CONTRACT_PATH) .function("get") diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 38794fe7137..75cc1ad602f 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -25,6 +25,7 @@ use zksync_types::{ l2::{error::TxCheckError::TxDuplication, L2Tx}, transaction_request::CallOverrides, utils::storage_key_for_eth_balance, + vm::FastVmMode, AccountTreeId, Address, L2ChainId, Nonce, ProtocolVersionId, Transaction, H160, H256, MAX_NEW_FACTORY_DEPS, U256, }; @@ -89,6 +90,7 @@ pub async fn build_tx_sender( /// Oneshot executor options used by the API server sandbox. #[derive(Debug)] pub struct SandboxExecutorOptions { + pub(crate) fast_vm_mode: FastVmMode, /// Env parameters to be used when estimating gas. pub(crate) estimate_gas: OneshotEnvParameters, /// Env parameters to be used when performing `eth_call` requests. @@ -114,6 +116,7 @@ impl SandboxExecutorOptions { .context("failed loading base contracts for calls / tx execution")?; Ok(Self { + fast_vm_mode: FastVmMode::Old, estimate_gas: OneshotEnvParameters::new( Arc::new(estimate_gas_contracts), chain_id, @@ -129,6 +132,11 @@ impl SandboxExecutorOptions { }) } + /// Sets the fast VM mode used by this executor. + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + self.fast_vm_mode = fast_vm_mode; + } + pub(crate) async fn mock() -> Self { Self::new(L2ChainId::default(), AccountTreeId::default(), u32::MAX) .await diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 4528d9cda12..7db1b833931 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -74,6 +74,28 @@ async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractE test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; } +#[tokio::test] +async fn initial_gas_estimate_for_l1_transaction() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + let tx = alice.create_l1_counter_tx(1.into(), false); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + assert!(initial_estimate.total_gas_charged.is_none()); + + let (vm_result, _) = estimator.unadjusted_step(15_000).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + let (vm_result, _) = estimator.unadjusted_step(1_000_000).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + #[test_casing(2, [false, true])] #[tokio::test] async fn initial_estimate_for_deep_recursion(with_reads: bool) { @@ -322,9 +344,10 @@ async fn insufficient_funds_error_for_transfer() { async fn test_estimating_gas( state_override: StateOverride, - tx: L2Tx, + tx: impl Into, acceptable_overestimation: u64, ) { + let tx = tx.into(); let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; let block_args = pending_block_args(&tx_sender).await; @@ -332,7 +355,7 @@ async fn test_estimating_gas( let fee_scale_factor = 1.0; let fee = tx_sender .get_txs_fee_in_wei( - tx.clone().into(), + tx.clone(), block_args.clone(), fee_scale_factor, acceptable_overestimation, @@ -350,7 +373,7 @@ async fn test_estimating_gas( let fee = tx_sender .get_txs_fee_in_wei( - tx.into(), + tx, block_args, fee_scale_factor, acceptable_overestimation, @@ -383,6 +406,15 @@ async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { test_estimating_gas(state_override, tx, acceptable_overestimation).await; } +#[tokio::test] +async fn estimating_gas_for_l1_transaction() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + let tx = alice.create_l1_counter_tx(1.into(), false); + + test_estimating_gas(state_override, tx, 0).await; +} + #[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] #[tokio::test] async fn estimating_gas_for_load_test_tx( diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs index cacd616202d..ea3f77fbcd8 100644 --- a/core/node/api_server/src/tx_sender/tests/mod.rs +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -145,13 +145,14 @@ async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { drop(storage); let genesis_config = genesis_params.config(); - let executor_options = SandboxExecutorOptions::new( + let mut executor_options = SandboxExecutorOptions::new( genesis_config.l2_chain_id, AccountTreeId::new(genesis_config.fee_account), u32::MAX, ) .await .unwrap(); + executor_options.set_fast_vm_mode(FastVmMode::Shadow); let pg_caches = PostgresStorageCaches::new(1, 1); let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 45128f579cd..7dd0164198a 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -16,8 +16,8 @@ use zksync_multivm::interface::{ }; use zksync_types::{ api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, - transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, - StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, + transaction_request::CallRequest, vm::FastVmMode, K256PrivateKey, L2ChainId, + PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, }; use zksync_utils::u256_to_h256; use zksync_vm_executor::oneshot::{ @@ -92,6 +92,7 @@ impl BaseSystemContractsProvider for BaseContractsWithMockE fn executor_options_with_evm_emulator() -> SandboxExecutorOptions { let base_contracts = Arc::::default(); SandboxExecutorOptions { + fast_vm_mode: FastVmMode::Old, estimate_gas: OneshotEnvParameters::new( base_contracts.clone(), L2ChainId::default(), diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 46b84c34061..cbd4918dcee 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -11,7 +11,8 @@ use zksync_vm_executor::oneshot::{ CallOrExecute, MainOneshotExecutor, MultiVMBaseSystemContracts, OneshotEnvParameters, }; use zksync_vm_interface::{ - executor::OneshotExecutor, ExecutionResult, OneshotTracingParams, TxExecutionArgs, + executor::OneshotExecutor, storage::StorageWithOverrides, ExecutionResult, + OneshotTracingParams, TxExecutionArgs, }; use crate::{abi, storage::ConnectionPool}; @@ -89,7 +90,7 @@ impl VM { let output = ctx .wait(self.executor.inspect_transaction_with_bytecode_compression( - storage, + StorageWithOverrides::new(storage), env, TxExecutionArgs::for_eth_call(tx), OneshotTracingParams::default(), diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index ba1a69e23bb..023ef1059c7 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -6,7 +6,7 @@ use zksync_node_api_server::{ tx_sender::{SandboxExecutorOptions, TxSenderBuilder, TxSenderConfig}, }; use zksync_state::{PostgresStorageCaches, PostgresStorageCachesTask}; -use zksync_types::{AccountTreeId, Address}; +use zksync_types::{vm::FastVmMode, AccountTreeId, Address}; use zksync_web3_decl::{ client::{DynClient, L2}, jsonrpsee, @@ -60,6 +60,7 @@ pub struct TxSenderLayer { postgres_storage_caches_config: PostgresStorageCachesConfig, max_vm_concurrency: usize, whitelisted_tokens_for_aa_cache: bool, + vm_mode: FastVmMode, } #[derive(Debug, FromContext)] @@ -95,6 +96,7 @@ impl TxSenderLayer { postgres_storage_caches_config, max_vm_concurrency, whitelisted_tokens_for_aa_cache: false, + vm_mode: FastVmMode::Old, } } @@ -106,6 +108,12 @@ impl TxSenderLayer { self.whitelisted_tokens_for_aa_cache = value; self } + + /// Sets the fast VM modes used for all supported operations. + pub fn with_vm_mode(mut self, mode: FastVmMode) -> Self { + self.vm_mode = mode; + self + } } #[async_trait::async_trait] @@ -151,12 +159,13 @@ impl WiringLayer for TxSenderLayer { // TODO (BFT-138): Allow to dynamically reload API contracts let config = self.tx_sender_config; - let executor_options = SandboxExecutorOptions::new( + let mut executor_options = SandboxExecutorOptions::new( config.chain_id, AccountTreeId::new(config.fee_account_addr), config.validation_computational_gas_limit, ) .await?; + executor_options.set_fast_vm_mode(self.vm_mode); // Build `TxSender`. let mut tx_sender = TxSenderBuilder::new(config, replica_pool, tx_sink); diff --git a/etc/env/file_based/overrides/tests/integration.yaml b/etc/env/file_based/overrides/tests/integration.yaml new file mode 100644 index 00000000000..6ad031e2945 --- /dev/null +++ b/etc/env/file_based/overrides/tests/integration.yaml @@ -0,0 +1,4 @@ +experimental_vm: + # Use the shadow VM mode everywhere to catch divergences as early as possible + state_keeper_fast_vm_mode: SHADOW + api_fast_vm_mode: SHADOW diff --git a/etc/env/file_based/overrides/tests/loadtest-new.yaml b/etc/env/file_based/overrides/tests/loadtest-new.yaml index 2167f7347e0..e66625636b1 100644 --- a/etc/env/file_based/overrides/tests/loadtest-new.yaml +++ b/etc/env/file_based/overrides/tests/loadtest-new.yaml @@ -1,7 +1,11 @@ db: merkle_tree: mode: LIGHTWEIGHT +api: + web3_json_rpc: + estimate_gas_optimize_search: true experimental_vm: state_keeper_fast_vm_mode: NEW + api_fast_vm_mode: NEW mempool: delay_interval: 50 diff --git a/etc/env/file_based/overrides/tests/loadtest-old.yaml b/etc/env/file_based/overrides/tests/loadtest-old.yaml index a2d66d1cf4a..7b1a3587018 100644 --- a/etc/env/file_based/overrides/tests/loadtest-old.yaml +++ b/etc/env/file_based/overrides/tests/loadtest-old.yaml @@ -3,5 +3,6 @@ db: mode: LIGHTWEIGHT experimental_vm: state_keeper_fast_vm_mode: OLD + api_fast_vm_mode: OLD mempool: delay_interval: 50 From 91ec341b8aab19da7cfff125f0d94490df65cd06 Mon Sep 17 00:00:00 2001 From: Alexander Melnikov Date: Fri, 25 Oct 2024 07:35:05 -0600 Subject: [PATCH 132/140] chore(configs): Adjust file based configs (#3171) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * Add fields to mainnet.yaml so that `zkstack` can create mainnet ecosystem * Add comment about typo in `max_acceptable_priority_fee_in_gwei` (caused problems before) * Add `l1_batch_min_age_before_execute_seconds` to avoid sending Execute tx too early ## Why ❔ * To make things work ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- etc/env/ecosystems/mainnet.yaml | 5 +++++ etc/env/file_based/general.yaml | 2 +- etc/env/file_based/overrides/mainnet.yaml | 1 + etc/env/file_based/overrides/testnet.yaml | 1 + 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/etc/env/ecosystems/mainnet.yaml b/etc/env/ecosystems/mainnet.yaml index 7d4266e8b76..f7b09150793 100644 --- a/etc/env/ecosystems/mainnet.yaml +++ b/etc/env/ecosystems/mainnet.yaml @@ -1,3 +1,5 @@ +create2_factory_addr: 0xce0042b868300000d44a59004da54a005ffdcf9f +create2_factory_salt: '0x0000000000000000000000000000000000000000000000000000000000000000' ecosystem_contracts: bridgehub_proxy_addr: 0x303a465B659cBB0ab36eE643eA362c509EEb5213 state_transition_proxy_addr: 0xc2eE6b6af7d616f6e27ce7F4A451Aedc2b0F5f5C @@ -17,3 +19,6 @@ l1: verifier_addr: 0x70F3FBf8a427155185Ec90BED8a3434203de9604 validator_timelock_addr: 0x5D8ba173Dc6C3c90C8f7C04C9288BeF5FDbAd06E base_token_addr: '0x0000000000000000000000000000000000000000' +l2: + testnet_paymaster_addr: '0x0000000000000000000000000000000000000000' + default_l2_upgrader: '0x0000000000000000000000000000000000000000' diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 587ba4614a5..8758d38186f 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -106,7 +106,7 @@ eth: max_eth_tx_data_size: 120000 aggregated_proof_sizes: [ 1 ] max_aggregated_tx_gas: 15000000 - max_acceptable_priority_fee_in_gwei: 100000000000 + max_acceptable_priority_fee_in_gwei: 100000000000 # typo: value is in wei (100 gwei) pubdata_sending_mode: BLOBS gas_adjuster: default_priority_fee_per_gas: 1000000000 diff --git a/etc/env/file_based/overrides/mainnet.yaml b/etc/env/file_based/overrides/mainnet.yaml index 7565aac869a..847f9ae98aa 100644 --- a/etc/env/file_based/overrides/mainnet.yaml +++ b/etc/env/file_based/overrides/mainnet.yaml @@ -11,6 +11,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + l1_batch_min_age_before_execute_seconds: 76000 # 21h wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.06 diff --git a/etc/env/file_based/overrides/testnet.yaml b/etc/env/file_based/overrides/testnet.yaml index d36cf9fc7bc..4643a963ed7 100644 --- a/etc/env/file_based/overrides/testnet.yaml +++ b/etc/env/file_based/overrides/testnet.yaml @@ -11,6 +11,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + l1_batch_min_age_before_execute_seconds: 1500 # 25m wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.1 From f3724a71c7466451d380981b05d68d8afd70cdca Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 25 Oct 2024 16:01:02 +0200 Subject: [PATCH 133/140] feat(proof-data-handler): add tee_proof_generation_timeout_in_secs param (#3128) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add `tee_proof_generation_timeout_in_secs` parameter to the `proof-data-handler` configuration to avoid sharing the same `proof_generation_timeout_in_secs` timeout with the ZK prover. This timeout is for retrying TEE proof generation if it fails. Retries continue indefinitely until successful. ## Why ❔ The TEE prover is much faster than the ZK prover, so some of the ZK timeouts are too long to be shared with the TEE-specific code. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/config/src/configs/proof_data_handler.rs | 13 +++++++++++++ core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/proof_data_handler.rs | 2 ++ core/lib/protobuf_config/src/proof_data_handler.rs | 9 +++++++++ .../protobuf_config/src/proto/config/prover.proto | 1 + .../proof_data_handler/src/tee_request_processor.rs | 2 +- core/node/proof_data_handler/src/tests.rs | 2 ++ etc/env/base/proof_data_handler.toml | 1 + etc/env/file_based/general.yaml | 1 + 9 files changed, 31 insertions(+), 1 deletion(-) diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index 1094b1bb180..1d8703df51a 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -9,6 +9,9 @@ pub struct TeeConfig { pub tee_support: bool, /// All batches before this one are considered to be processed. pub first_tee_processed_batch: L1BatchNumber, + /// Timeout in seconds for retrying TEE proof generation if it fails. Retries continue + /// indefinitely until successful. + pub tee_proof_generation_timeout_in_secs: u16, } impl Default for TeeConfig { @@ -16,6 +19,8 @@ impl Default for TeeConfig { TeeConfig { tee_support: Self::default_tee_support(), first_tee_processed_batch: Self::default_first_tee_processed_batch(), + tee_proof_generation_timeout_in_secs: + Self::default_tee_proof_generation_timeout_in_secs(), } } } @@ -28,6 +33,14 @@ impl TeeConfig { pub fn default_first_tee_processed_batch() -> L1BatchNumber { L1BatchNumber(0) } + + pub fn default_tee_proof_generation_timeout_in_secs() -> u16 { + 600 + } + + pub fn tee_proof_generation_timeout(&self) -> Duration { + Duration::from_secs(self.tee_proof_generation_timeout_in_secs.into()) + } } #[derive(Debug, Deserialize, Clone, PartialEq)] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index f8e53e33042..21ff9e2351b 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -681,6 +681,7 @@ impl Distribution for EncodeDist { tee_config: configs::TeeConfig { tee_support: self.sample(rng), first_tee_processed_batch: L1BatchNumber(rng.gen()), + tee_proof_generation_timeout_in_secs: self.sample(rng), }, } } diff --git a/core/lib/env_config/src/proof_data_handler.rs b/core/lib/env_config/src/proof_data_handler.rs index b5bfda4544e..47848585e76 100644 --- a/core/lib/env_config/src/proof_data_handler.rs +++ b/core/lib/env_config/src/proof_data_handler.rs @@ -28,6 +28,7 @@ mod tests { tee_config: TeeConfig { tee_support: true, first_tee_processed_batch: L1BatchNumber(1337), + tee_proof_generation_timeout_in_secs: 600, }, } } @@ -39,6 +40,7 @@ mod tests { PROOF_DATA_HANDLER_HTTP_PORT="3320" PROOF_DATA_HANDLER_TEE_SUPPORT="true" PROOF_DATA_HANDLER_FIRST_TEE_PROCESSED_BATCH="1337" + PROOF_DATA_HANDLER_TEE_PROOF_GENERATION_TIMEOUT_IN_SECS="600" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs index a587c702633..c01e163bd77 100644 --- a/core/lib/protobuf_config/src/proof_data_handler.rs +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -23,6 +23,12 @@ impl ProtoRepr for proto::ProofDataHandler { .first_tee_processed_batch .map(|x| L1BatchNumber(x as u32)) .unwrap_or_else(configs::TeeConfig::default_first_tee_processed_batch), + tee_proof_generation_timeout_in_secs: self + .tee_proof_generation_timeout_in_secs + .map(|x| x as u16) + .unwrap_or_else( + configs::TeeConfig::default_tee_proof_generation_timeout_in_secs, + ), }, }) } @@ -33,6 +39,9 @@ impl ProtoRepr for proto::ProofDataHandler { proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), tee_support: Some(this.tee_config.tee_support), first_tee_processed_batch: Some(this.tee_config.first_tee_processed_batch.0 as u64), + tee_proof_generation_timeout_in_secs: Some( + this.tee_config.tee_proof_generation_timeout_in_secs.into(), + ), } } } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index 92ba770a756..392834d25f3 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -109,4 +109,5 @@ message ProofDataHandler { optional uint32 proof_generation_timeout_in_secs = 2; // required; s optional bool tee_support = 3; // optional optional uint64 first_tee_processed_batch = 4; // optional + optional uint32 tee_proof_generation_timeout_in_secs = 5; // optional } diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 8e06d0c26bc..b265b94d4d7 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -164,7 +164,7 @@ impl TeeRequestProcessor { .tee_proof_generation_dal() .lock_batch_for_proving( tee_type, - self.config.proof_generation_timeout(), + self.config.tee_config.tee_proof_generation_timeout(), min_batch_number, ) .await diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 63ea087a81c..87c6bff8a1f 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -28,6 +28,7 @@ async fn request_tee_proof_inputs() { tee_config: TeeConfig { tee_support: true, first_tee_processed_batch: L1BatchNumber(0), + tee_proof_generation_timeout_in_secs: 600, }, }, L1BatchCommitmentMode::Rollup, @@ -86,6 +87,7 @@ async fn submit_tee_proof() { tee_config: TeeConfig { tee_support: true, first_tee_processed_batch: L1BatchNumber(0), + tee_proof_generation_timeout_in_secs: 600, }, }, L1BatchCommitmentMode::Rollup, diff --git a/etc/env/base/proof_data_handler.toml b/etc/env/base/proof_data_handler.toml index 7a1999a03c3..b56ac26fb17 100644 --- a/etc/env/base/proof_data_handler.toml +++ b/etc/env/base/proof_data_handler.toml @@ -1,4 +1,5 @@ [proof_data_handler] http_port = 3320 proof_generation_timeout_in_secs = 18000 +tee_proof_generation_timeout_in_secs = 600 tee_support = true diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 8758d38186f..5abee904765 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -169,6 +169,7 @@ witness_vector_generator: data_handler: http_port: 3320 proof_generation_timeout_in_secs: 18000 + tee_proof_generation_timeout_in_secs: 600 tee_support: true prover_gateway: api_url: http://127.0.0.1:3320 From dd166f887b11a8dfb039a0030dda923c481f67af Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Fri, 25 Oct 2024 16:55:26 +0200 Subject: [PATCH 134/140] feat(prover): Add scale failure events watching and pods eviction. (#3175) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add scale failure events watching. Add pending pods eviction to different cluster if there are `FailedScaleUp` events. Keep watching k8s if an error occurred. ## Why ❔ To failover to different cluster faster if there is no capacity in the current one. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. ref ZKD-1855 --- prover/Cargo.lock | 141 +++++++++++------ prover/Cargo.toml | 2 +- .../prover_autoscaler/src/cluster_types.rs | 7 + .../prover_autoscaler/src/global/scaler.rs | 31 +++- .../bin/prover_autoscaler/src/k8s/watcher.rs | 144 +++++++++++------- 5 files changed, 222 insertions(+), 103 deletions(-) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index dbc3b3425e4..747d3df987e 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -326,7 +326,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "itoa", "matchit", @@ -341,7 +341,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -1605,6 +1605,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "either" version = "1.12.0" @@ -1678,6 +1690,26 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "enum_dispatch" version = "0.3.13" @@ -2610,9 +2642,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -2639,7 +2671,7 @@ dependencies = [ "futures-util", "headers", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-util", "pin-project-lite", @@ -2657,7 +2689,7 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "log", "rustls", @@ -2674,7 +2706,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "pin-project-lite", "tokio", @@ -2702,7 +2734,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "native-tls", "tokio", @@ -2712,20 +2744,19 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.5.0", "pin-project-lite", "socket2", "tokio", - "tower", "tower-service", "tracing", ] @@ -3075,7 +3106,7 @@ dependencies = [ "async-trait", "base64 0.22.1", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-util", "jsonrpsee-core", @@ -3086,7 +3117,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] @@ -3206,9 +3237,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" +checksum = "efffeb3df0bd4ef3e5d65044573499c0e4889b988070b08c50b25b1329289a1f" dependencies = [ "k8s-openapi", "kube-client", @@ -3219,9 +3250,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" +checksum = "8bf471ece8ff8d24735ce78dac4d091e9fcb8d74811aeb6b75de4d1c3f5de0f1" dependencies = [ "base64 0.22.1", "bytes", @@ -3232,7 +3263,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-http-proxy", "hyper-rustls", "hyper-timeout", @@ -3243,23 +3274,23 @@ dependencies = [ "pem", "rustls", "rustls-pemfile 2.1.2", - "secrecy", + "secrecy 0.10.3", "serde", "serde_json", "serde_yaml", "thiserror", "tokio", "tokio-util", - "tower", + "tower 0.5.1", "tower-http", "tracing", ] [[package]] name = "kube-core" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" +checksum = "f42346d30bb34d1d7adc5c549b691bce7aa3a1e60254e68fab7e2d7b26fe3d77" dependencies = [ "chrono", "form_urlencoded", @@ -3275,9 +3306,9 @@ dependencies = [ [[package]] name = "kube-derive" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" +checksum = "f9364e04cc5e0482136c6ee8b7fb7551812da25802249f35b3def7aaa31e82ad" dependencies = [ "darling 0.20.10", "proc-macro2 1.0.85", @@ -3288,16 +3319,16 @@ dependencies = [ [[package]] name = "kube-runtime" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" +checksum = "d3fbf1f6ffa98e65f1d2a9a69338bb60605d46be7edf00237784b89e62c9bd44" dependencies = [ "ahash 0.8.11", "async-broadcast", "async-stream", "async-trait", "backoff", - "derivative", + "educe", "futures 0.3.30", "hashbrown 0.14.5", "json-patch", @@ -4876,7 +4907,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-tls 0.6.0", "hyper-util", @@ -5321,7 +5352,15 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ - "serde", + "zeroize", +] + +[[package]] +name = "secrecy" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" +dependencies = [ "zeroize", ] @@ -6542,7 +6581,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-timeout", "hyper-util", "percent-encoding", @@ -6551,7 +6590,7 @@ dependencies = [ "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -6577,18 +6616,34 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "bitflags 2.6.0", "bytes", "http 1.1.0", "http-body 1.0.0", - "http-body-util", "mime", "pin-project-lite", "tower-layer", @@ -6598,15 +6653,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -7745,7 +7800,7 @@ dependencies = [ "ethabi", "hex", "num_enum 0.7.2", - "secrecy", + "secrecy 0.8.0", "serde", "serde_json", "serde_with", @@ -7830,7 +7885,7 @@ version = "0.1.0" dependencies = [ "anyhow", "rand 0.8.5", - "secrecy", + "secrecy 0.8.0", "serde", "strum", "strum_macros", @@ -8283,7 +8338,7 @@ dependencies = [ "hex", "prost 0.12.6", "rand 0.8.5", - "secrecy", + "secrecy 0.8.0", "serde_json", "serde_yaml", "time", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index af022e691c1..31c663590ef 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -32,7 +32,7 @@ indicatif = "0.16" itertools = "0.10.5" jemallocator = "0.5" k8s-openapi = { version = "0.23.0", features = ["v1_30"] } -kube = { version = "0.95.0", features = ["runtime", "derive"] } +kube = { version = "0.96.0", features = ["runtime", "derive"] } local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs index b800b86f3c2..e3e4c9b4df0 100644 --- a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -29,11 +29,18 @@ where ordered.serialize(serializer) } +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ScaleEvent { + pub name: String, + pub time: DateTime, +} + #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Namespace { #[serde(serialize_with = "ordered_map")] pub deployments: HashMap, pub pods: HashMap, + pub scale_errors: Vec, } #[derive(Debug, Clone, Default, Serialize, Deserialize)] diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index 884174562a1..eb4249d071f 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -21,7 +21,7 @@ struct GPUPool { name: String, gpu: Gpu, provers: HashMap, // TODO: consider using i64 everywhere to avoid type casts. - preemtions: u64, + scale_errors: usize, max_pool_size: u32, } @@ -140,6 +140,11 @@ impl Scaler { .and_then(|inner_map| inner_map.get(&gpu)) .copied() .unwrap_or(0), + scale_errors: namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::hours(1)) // TODO Move the duration into config. + .count(), ..Default::default() }); @@ -147,6 +152,12 @@ impl Scaler { e.provers.insert(PodStatus::Running, 0); } + let recent_scale_errors = namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::minutes(4)) // TODO Move the duration into config. This should be at least x2 or run interval. + .count(); + for ppg in namespace_value .pods .iter() @@ -158,10 +169,12 @@ impl Scaler { ..Default::default() }); let mut status = PodStatus::from_str(&ppg.pod.status).unwrap_or_default(); - if status == PodStatus::Pending - && ppg.pod.changed < Utc::now() - self.long_pending_duration - { - status = PodStatus::LongPending; + if status == PodStatus::Pending { + if ppg.pod.changed < Utc::now() - self.long_pending_duration { + status = PodStatus::LongPending; + } else if recent_scale_errors > 0 { + status = PodStatus::NeedToMove; + } } tracing::info!( "pod {}: status: {}, real status: {}", @@ -172,7 +185,7 @@ impl Scaler { e.provers.entry(status).and_modify(|n| *n += 1).or_insert(1); } - tracing::info!("From pods {:?}", gp_map.sorted_debug()); + tracing::debug!("From pods {:?}", gp_map.sorted_debug()); gp_map.into_values().collect() } @@ -195,7 +208,7 @@ impl Scaler { a.sum_by_pod_status(PodStatus::LongPending) .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), ) // Sort by long Pending pods. - .then(a.preemtions.cmp(&b.preemtions)) // Sort by preemtions in the cluster. + .then(a.scale_errors.cmp(&b.scale_errors)) // Sort by scale_errors in the cluster. .then( self.cluster_priorities .get(&a.name) @@ -455,6 +468,7 @@ mod tests { }, )] .into(), + ..Default::default() }, )] .into(), @@ -521,6 +535,7 @@ mod tests { }, )] .into(), + ..Default::default() }, )] .into(), @@ -681,6 +696,7 @@ mod tests { ) ] .into(), + ..Default::default() }, )] .into(), @@ -718,6 +734,7 @@ mod tests { ) ] .into(), + ..Default::default() }, )] .into(), diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs index f94dfc3704f..5384db082bc 100644 --- a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, sync::Arc}; -use chrono::Utc; +use chrono::{DateTime, Utc}; use futures::{stream, StreamExt, TryStreamExt}; use k8s_openapi::api; use kube::{ @@ -9,7 +9,7 @@ use kube::{ }; use tokio::sync::Mutex; -use crate::cluster_types::{Cluster, Deployment, Namespace, Pod}; +use crate::cluster_types::{Cluster, Deployment, Namespace, Pod, ScaleEvent}; #[derive(Clone)] pub struct Watcher { @@ -62,6 +62,15 @@ impl Watcher { .map_ok(Watched::Pod) .boxed(), ); + + let events: Api = Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(events, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Event) + .boxed(), + ); } // select on applied events from all watchers let mut combo_stream = stream::select_all(watchers); @@ -70,61 +79,92 @@ impl Watcher { enum Watched { Deploy(api::apps::v1::Deployment), Pod(api::core::v1::Pod), + Event(api::core::v1::Event), } - while let Some(o) = combo_stream.try_next().await? { + while let Some(o) = combo_stream.next().await { match o { - Watched::Deploy(d) => { - let namespace = match d.namespace() { - Some(n) => n.to_string(), - None => continue, - }; - let mut cluster = self.cluster.lock().await; - let v = cluster.namespaces.get_mut(&namespace).unwrap(); - let dep = v - .deployments - .entry(d.name_any()) - .or_insert(Deployment::default()); - let nums = d.status.clone().unwrap_or_default(); - dep.running = nums.available_replicas.unwrap_or_default(); - dep.desired = nums.replicas.unwrap_or_default(); + Ok(o) => match o { + Watched::Deploy(d) => { + let namespace = match d.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let dep = v + .deployments + .entry(d.name_any()) + .or_insert(Deployment::default()); + let nums = d.status.clone().unwrap_or_default(); + dep.running = nums.available_replicas.unwrap_or_default(); + dep.desired = nums.replicas.unwrap_or_default(); - tracing::info!( - "Got deployment: {}, size: {}/{} un {}", - d.name_any(), - nums.available_replicas.unwrap_or_default(), - nums.replicas.unwrap_or_default(), - nums.unavailable_replicas.unwrap_or_default(), - ) - } - Watched::Pod(p) => { - let namespace = match p.namespace() { - Some(n) => n.to_string(), - None => continue, - }; - let mut cluster = self.cluster.lock().await; - let v = cluster.namespaces.get_mut(&namespace).unwrap(); - let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); - pod.owner = p - .owner_references() - .iter() - .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) - .collect::>() - .join(":"); - // TODO: Collect replica sets to match deployments and pods. - let phase = p - .status - .clone() - .unwrap_or_default() - .phase - .unwrap_or_default(); - if phase != pod.status { - // TODO: try to get an idea how to set correct value on restart. - pod.changed = Utc::now(); + tracing::info!( + "Got deployment: {}, size: {}/{} un {}", + d.name_any(), + nums.available_replicas.unwrap_or_default(), + nums.replicas.unwrap_or_default(), + nums.unavailable_replicas.unwrap_or_default(), + ) } - pod.status = phase; + Watched::Pod(p) => { + let namespace = match p.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); + pod.owner = p + .owner_references() + .iter() + .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) + .collect::>() + .join(":"); + // TODO: Collect replica sets to match deployments and pods. + let phase = p + .status + .clone() + .unwrap_or_default() + .phase + .unwrap_or_default(); + if phase != pod.status { + // TODO: try to get an idea how to set correct value on restart. + pod.changed = Utc::now(); + } + pod.status = phase; - tracing::info!("Got pod: {}", p.name_any()) - } + tracing::info!("Got pod: {}", p.name_any()) + } + Watched::Event(e) => { + let namespace: String = match e.namespace() { + Some(n) => n, + None => "".into(), + }; + let name = e.name_any(); + let reason = e.reason.unwrap_or_default(); + if reason != "FailedScaleUp" { + // Ignore all events which are not scale issues. + continue; + } + let time: DateTime = match e.last_timestamp { + Some(t) => t.0, + None => Utc::now(), + }; + tracing::debug!( + "Got event: {}/{}, message: {:?}; action: {:?}, reason: {:?}", + namespace, + name, + e.message.unwrap_or_default(), + e.action.unwrap_or_default(), + reason + ); + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + v.scale_errors.push(ScaleEvent { name, time }) + } + }, + Err(err) => tracing::warn!("Error during watch: {err:?}"), } } From 89eadd353c4fb84bb815ae56b29f4ff3467b80f3 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Sat, 26 Oct 2024 10:06:16 +0200 Subject: [PATCH 135/140] fix: allow compilation under current toolchain (#3176) Nightly breaks regex crate. The option is to either remove the nightly feature we used that now breaks, or pin nightly to something prior to 2024-10-17. The first option was picked, which requires updates to downstream dependencies. This PR updates said dependencies. --- Cargo.lock | 58 +++++++++++----------- Cargo.toml | 6 +-- prover/Cargo.lock | 122 +++++++++++++++++++++++----------------------- prover/Cargo.toml | 10 ++-- 4 files changed, 98 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de2c2d6c9b2..2e3a8a1c3e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1327,14 +1327,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" +checksum = "2501cc688ef391013019495ae7035cfd54f86987e36d10f73976ce4c5d413c5a" dependencies = [ "derivative", "serde", - "zk_evm 0.150.6", - "zkevm_circuits 0.150.6", + "zk_evm 0.150.7", + "zkevm_circuits 0.150.7", ] [[package]] @@ -1394,11 +1394,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" +checksum = "917d27db531fdd98a51e42ea465bc097f48cc849e7fad68d7856087d15125be1" dependencies = [ - "circuit_encodings 0.150.6", + "circuit_encodings 0.150.7", "derivative", "rayon", "serde", @@ -9360,9 +9360,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" +checksum = "3cc74fbe2b45fd19e95c59ea792c795feebdb616ebaa463f0ac567f495f47387" dependencies = [ "anyhow", "lazy_static", @@ -9370,7 +9370,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.6", + "zk_evm_abstractions 0.150.7", ] [[package]] @@ -9401,15 +9401,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" +checksum = "37f333a3b059899df09e40deb041af881bc03e496fda5eec618ffb5e854ee7df" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", ] [[package]] @@ -9458,9 +9458,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" +checksum = "d06fb35b00699d25175a2ad447f86a9088af8b0bc698bb57086fb04c13e52eab" dependencies = [ "arrayvec 0.7.6", "boojum", @@ -9472,7 +9472,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", "zksync_cs_derive", ] @@ -9520,9 +9520,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" +checksum = "b83f3b279248af4ca86dec20a54127f02110b45570f3f6c1d13df49ba75c28a5" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -9646,7 +9646,7 @@ dependencies = [ "anyhow", "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "futures 0.3.30", "itertools 0.10.5", "num_cpus", @@ -9658,7 +9658,7 @@ dependencies = [ "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -10377,9 +10377,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" +checksum = "dc58af8e4e4ad1a851ffd2275e6a44ead0f15a7eaac9dc9d60a56b3b9c9b08e8" dependencies = [ "boojum", "derivative", @@ -10389,7 +10389,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.6", + "zkevm_circuits 0.150.7", ] [[package]] @@ -10516,7 +10516,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "ethabi", "hex", "itertools 0.10.5", @@ -10530,7 +10530,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_contracts", "zksync_eth_signer", "zksync_mini_merkle_tree", @@ -10572,7 +10572,7 @@ dependencies = [ "tower-http", "tracing", "vise", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_config", "zksync_consensus_roles", "zksync_contracts", @@ -10968,7 +10968,7 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "serde", "serde_json", "serde_with", @@ -11330,8 +11330,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0cc dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.6", - "zkevm_opcode_defs 0.150.6", + "zk_evm_abstractions 0.150.7", + "zkevm_opcode_defs 0.150.7", "zksync_vm2_interface", ] diff --git a/Cargo.toml b/Cargo.toml index 0f8e6ba77ae..dc6fdf1727e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -219,15 +219,15 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.6" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.7" } crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.30.1" } -kzg = { package = "zksync_kzg", version = "=0.150.6" } +kzg = { package = "zksync_kzg", version = "=0.150.7" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } zk_evm_1_3_3 = { package = "zk_evm", version = "0.133" } zk_evm_1_4_0 = { package = "zk_evm", version = "0.140" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.6" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.7" } # New VM; pinned to a specific commit because of instability zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "df5bec3d04d64d434f9b0ccb285ba4681008f7b3" } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 747d3df987e..d68ef368a4a 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -651,9 +651,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c681a3f867afe40bcc188e5cb5260bbf5699531823affa3cbe28f7ca9b7bc9" +checksum = "4b63a717789f92f16fd566c78655d64017c690be59e473c3e769080c975a1f9e" dependencies = [ "boojum", "cmake", @@ -694,7 +694,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", - "regex-automata 0.4.6", + "regex-automata 0.4.8", "serde", ] @@ -799,11 +799,11 @@ dependencies = [ [[package]] name = "circuit_definitions" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "492404ea63c934d8e894325f0a741723bf91cd035cb34a92fddd8617c4a00fd3" +checksum = "76be9ee6e75f1f948d175ab9820ecc7189f72154c95ca503a1974012356f5363" dependencies = [ - "circuit_encodings 0.150.6", + "circuit_encodings 0.150.7", "crossbeam", "derivative", "seq-macro", @@ -849,14 +849,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" +checksum = "2501cc688ef391013019495ae7035cfd54f86987e36d10f73976ce4c5d413c5a" dependencies = [ "derivative", "serde", - "zk_evm 0.150.6", - "zkevm_circuits 0.150.6", + "zk_evm 0.150.7", + "zkevm_circuits 0.150.7", ] [[package]] @@ -916,11 +916,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" +checksum = "917d27db531fdd98a51e42ea465bc097f48cc849e7fad68d7856087d15125be1" dependencies = [ - "circuit_encodings 0.150.6", + "circuit_encodings 0.150.7", "derivative", "rayon", "serde", @@ -1773,9 +1773,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e1990fee6e9d25b40524ce53ca7977a211155a17bc7277f4dd354633e4fc22" +checksum = "ad950752eeb44f8938be405b95a1630f82e903f4a7adda355d92aad135fcd382" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1784,9 +1784,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d84e8d300c28cd91ceb56340f66da8607409f44a45f5e694e23723630db8c852" +checksum = "c38607d52509b5db97cc4447c8644d6c5ca84f22ff8a9254f984669b1eb82ed4" dependencies = [ "serde_json", ] @@ -4454,7 +4454,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.3", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -4800,14 +4800,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.6", - "regex-syntax 0.8.3", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -4821,13 +4821,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.3", + "regex-syntax 0.8.5", ] [[package]] @@ -4838,9 +4838,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rend" @@ -5721,9 +5721,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92776ca824f49c255a7417939706d759e0fd3dd4217420d01da68beae04f0bd6" +checksum = "9d2ac4440b6c23005c43a81cf064b9aa123fbeb992ac91cd04c7d485abb1fbea" dependencies = [ "bincode", "blake2 0.10.6", @@ -7526,9 +7526,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" +checksum = "3cc74fbe2b45fd19e95c59ea792c795feebdb616ebaa463f0ac567f495f47387" dependencies = [ "anyhow", "lazy_static", @@ -7536,7 +7536,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.6", + "zk_evm_abstractions 0.150.7", ] [[package]] @@ -7567,22 +7567,22 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" +checksum = "37f333a3b059899df09e40deb041af881bc03e496fda5eec618ffb5e854ee7df" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", ] [[package]] name = "zkevm-assembly" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc743ac7b0d618536dc3ace798fd4b8af78b057884afda5785c7970e15d62d0" +checksum = "cf011a0c83cbfb175f1e60811f0e0cd56551c9e35df596a762556662c638deb9" dependencies = [ "env_logger 0.9.3", "hex", @@ -7595,7 +7595,7 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", ] [[package]] @@ -7644,9 +7644,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" +checksum = "d06fb35b00699d25175a2ad447f86a9088af8b0bc698bb57086fb04c13e52eab" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7658,7 +7658,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", "zksync_cs_derive", ] @@ -7706,9 +7706,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" +checksum = "b83f3b279248af4ca86dec20a54127f02110b45570f3f6c1d13df49ba75c28a5" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7723,13 +7723,13 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73ad3e73d290a38a35dd245fd68cb6f498a8a8da4a52f846e88da3d3c31a34fd" +checksum = "d9c801aa17e9009699aacf654588d6adfaeeb8a490b2d9121847c201e2766803" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "codegen", "crossbeam", "derivative", @@ -7750,9 +7750,9 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d555e24b853359c5b076c52f9ff9e0ed62a7edc8c2f82f93517c524410c21ecb" +checksum = "5688dc060456f6c1e790d589f3abd6d9e9a11eb393d7383fbeb23b55961951e0" dependencies = [ "cmake", "crossbeam", @@ -7765,9 +7765,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615dad34e5fe678ec3b3e029af3f19313bebb1b771a8ce963c9ab9a8cc3879d3" +checksum = "5714848e6f8361820346483246dd68b4e7fb05ec41dd6610a8b53fb5c3ca7f3a" dependencies = [ "bit-vec", "cfg-if", @@ -7782,9 +7782,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80721b2da2643bd43f664ac65673ee078e6973c0a88d75b73bfaeac8e1bf5432" +checksum = "52a6a1863818d939d445c53af57e53c222f11c2c94b9a94c3612dd938a3d983c" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -8152,9 +8152,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" +checksum = "dc58af8e4e4ad1a851ffd2275e6a44ead0f15a7eaac9dc9d60a56b3b9c9b08e8" dependencies = [ "boojum", "derivative", @@ -8164,7 +8164,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.6", + "zkevm_circuits 0.150.7", ] [[package]] @@ -8200,7 +8200,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "ethabi", "hex", "itertools 0.10.5", @@ -8212,7 +8212,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_contracts", "zksync_mini_merkle_tree", "zksync_system_constants", @@ -8265,7 +8265,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -8492,7 +8492,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "serde", "serde_with", "strum", @@ -8689,8 +8689,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0cc dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.6", - "zkevm_opcode_defs 0.150.6", + "zk_evm_abstractions 0.150.7", + "zkevm_opcode_defs 0.150.7", "zksync_vm2_interface", ] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 31c663590ef..32c3185f64c 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -63,13 +63,13 @@ url = "2.5.2" vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.6" -circuit_sequencer_api = "=0.150.6" -zkevm_test_harness = "=0.150.6" +circuit_definitions = "=0.150.7" +circuit_sequencer_api = "=0.150.7" +zkevm_test_harness = "=0.150.7" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.0" } -shivini = "=0.151.0" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.1" } +shivini = "=0.151.1" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } From b5490a04d3b73e520de9bdae0d132fa35a885665 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 28 Oct 2024 18:31:35 +0200 Subject: [PATCH 136/140] test(vm): Improve instruction-counting VM benchmark (#3105) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Replaces `iai` with an alternative; brushes up instruction counting in general. ## Why ❔ - The library currently used for the benchmark (`iai`) is unmaintained. - It doesn't work with newer valgrind versions. - It doesn't allow measuring parts of program execution, only the entire program run. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/vm-perf-comparison.yml | 33 ++- .github/workflows/vm-perf-to-prometheus.yml | 4 +- Cargo.lock | 57 +++-- Cargo.toml | 4 +- core/lib/multivm/src/versions/vm_fast/mod.rs | 2 +- core/lib/vm_executor/src/batch/factory.rs | 2 +- core/tests/vm-benchmark/Cargo.toml | 4 +- core/tests/vm-benchmark/benches/iai.rs | 35 --- .../vm-benchmark/benches/instructions.rs | 206 ++++++++++++++++++ core/tests/vm-benchmark/src/bin/common/mod.rs | 54 ----- .../src/bin/compare_iai_results.rs | 108 --------- .../src/bin/iai_results_to_prometheus.rs | 52 ----- .../src/bin/instruction_counts.rs | 106 ++++++++- core/tests/vm-benchmark/src/criterion.rs | 6 +- core/tests/vm-benchmark/src/lib.rs | 2 +- core/tests/vm-benchmark/src/vm.rs | 114 ++++++---- 16 files changed, 446 insertions(+), 343 deletions(-) delete mode 100644 core/tests/vm-benchmark/benches/iai.rs create mode 100644 core/tests/vm-benchmark/benches/instructions.rs delete mode 100644 core/tests/vm-benchmark/src/bin/common/mod.rs delete mode 100644 core/tests/vm-benchmark/src/bin/compare_iai_results.rs delete mode 100644 core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 49830a30cc1..3520419f133 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -40,6 +40,8 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + # Set the minimum reported instruction count difference to reduce noise + echo "BENCHMARK_DIFF_THRESHOLD_PERCENT=2" >> .env - name: init run: | @@ -51,8 +53,8 @@ jobs: run: | ci_run zkstackup -g --local ci_run zkstack dev contracts --system-contracts - ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai - ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose || echo "Instructions benchmark is missing" + ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes - name: checkout PR run: | @@ -60,24 +62,39 @@ jobs: - name: run benchmarks on PR shell: bash + id: comparison run: | ci_run zkstackup -g --local ci_run zkstack dev contracts --system-contracts - ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai - ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose + ci_run cargo bench --package vm-benchmark --bench instructions -- --print > instructions.log 2>/dev/null + # Output all lines from the benchmark result starting from the "## ..." comparison header. + # Since the output spans multiple lines, we use a heredoc declaration. EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) echo "speedup<<$EOF" >> $GITHUB_OUTPUT - ci_run cargo run --package vm-benchmark --release --bin compare_iai_results base-iai pr-iai base-opcodes pr-opcodes >> $GITHUB_OUTPUT + sed -n '/^## /,$p' instructions.log >> $GITHUB_OUTPUT + echo "$EOF" >> $GITHUB_OUTPUT + + ci_run cargo run --package vm-benchmark --release --bin instruction_counts -- --diff base-opcodes > opcodes.log + echo "opcodes<<$EOF" >> $GITHUB_OUTPUT + sed -n '/^## /,$p' opcodes.log >> $GITHUB_OUTPUT echo "$EOF" >> $GITHUB_OUTPUT - id: comparison - name: Comment on PR uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 + if: steps.comparison.outputs.speedup != '' || steps.comparison.outputs.opcodes != '' with: message: | - ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} ${{ steps.comparison.outputs.speedup }} + ${{ steps.comparison.outputs.opcodes }} comment_tag: vm-performance-changes mode: recreate - create_if_not_exists: ${{ steps.comparison.outputs.speedup != '' }} + create_if_not_exists: true + - name: Remove PR comment + uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 + if: steps.comparison.outputs.speedup == '' && steps.comparison.outputs.opcodes == '' + with: + comment_tag: vm-performance-changes + message: 'No performance difference detected (anymore)' + mode: delete diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index d336a1472e4..93d33116794 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -48,5 +48,5 @@ jobs: ci_run cargo bench --package vm-benchmark --bench oneshot # Run only benches with 1,000 transactions per batch to not spend too much time ci_run cargo bench --package vm-benchmark --bench batch '/1000$' - ci_run cargo bench --package vm-benchmark --bench iai | tee iai-result - ci_run cargo run --package vm-benchmark --bin iai_results_to_prometheus --release < iai-result + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose + ci_run cargo bench --package vm-benchmark --bench instructions -- --print diff --git a/Cargo.lock b/Cargo.lock index 2e3a8a1c3e3..597da3c1b31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -959,7 +959,7 @@ name = "block_reverter" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "serde_json", "tokio", "zksync_block_reverter", @@ -1445,9 +1445,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -1455,14 +1455,15 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", "clap_lex 0.7.2", "strsim 0.11.1", + "terminal_size", ] [[package]] @@ -2796,7 +2797,7 @@ name = "genesis_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "futures 0.3.30", "serde", "serde_json", @@ -3472,12 +3473,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "iai" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" - [[package]] name = "iana-time-zone" version = "0.1.61" @@ -4122,7 +4117,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -4392,7 +4387,7 @@ name = "merkle_tree_consistency_checker" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "tracing", "zksync_config", "zksync_env_config", @@ -5498,7 +5493,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck 0.5.0", + "heck 0.4.1", "itertools 0.12.1", "log", "multimap", @@ -6583,7 +6578,7 @@ name = "selector_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "ethabi", "glob", "hex", @@ -7907,6 +7902,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + [[package]] name = "test-casing" version = "0.1.3" @@ -8751,11 +8756,11 @@ version = "0.1.0" dependencies = [ "assert_matches", "criterion", - "iai", "once_cell", "rand 0.8.5", "tokio", "vise", + "yab", "zksync_contracts", "zksync_multivm", "zksync_types", @@ -9239,6 +9244,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "yab" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b06cc62d4cec617d3c259537be0fcaa8a5bcf72ddf2983823d9528605f36ed3" +dependencies = [ + "anes", + "clap 4.5.20", + "num_cpus", + "thiserror", +] + [[package]] name = "yansi" version = "1.0.1" @@ -10229,7 +10246,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "clap 4.5.18", + "clap 4.5.20", "envy", "futures 0.3.30", "rustc_version", @@ -10437,7 +10454,7 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", - "clap 4.5.18", + "clap 4.5.20", "insta", "leb128", "once_cell", @@ -11017,7 +11034,7 @@ name = "zksync_server" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "futures 0.3.30", "serde_json", "tikv-jemallocator", diff --git a/Cargo.toml b/Cargo.toml index dc6fdf1727e..6d51e5060aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -122,6 +122,7 @@ derive_more = "1.0.0" envy = "0.4" ethabi = "18.0.0" flate2 = "1.0.28" +fraction = "0.15.3" futures = "0.3" glob = "0.3" google-cloud-auth = "0.16.0" @@ -131,7 +132,6 @@ hex = "0.4" http = "1.1" httpmock = "0.7.0" hyper = "1.3" -iai = "0.1" insta = "1.29.0" itertools = "0.10" jsonrpsee = { version = "0.23", default-features = false } @@ -190,7 +190,7 @@ tracing-opentelemetry = "0.25.0" time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" -fraction = "0.15.3" +yab = "0.1.0" # Proc-macro syn = "2.0" diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs index bb5a342bff2..35789c6cdc9 100644 --- a/core/lib/multivm/src/versions/vm_fast/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -1,4 +1,4 @@ -pub use zksync_vm2::interface::Tracer; +pub use zksync_vm2::interface; pub use self::{circuits_tracer::CircuitsTracer, vm::Vm}; diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index f974d17f4a7..de0db5f0bf7 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -37,7 +37,7 @@ pub trait BatchTracer: fmt::Debug + 'static + Send + Sealed { const TRACE_CALLS: bool; /// Tracer for the fast VM. #[doc(hidden)] - type Fast: vm_fast::Tracer + Default + 'static; + type Fast: vm_fast::interface::Tracer + Default + 'static; } impl Sealed for () {} diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 59c1e21493b..892bcf1c105 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -21,7 +21,7 @@ tokio.workspace = true [dev-dependencies] assert_matches.workspace = true -iai.workspace = true +yab.workspace = true [[bench]] name = "oneshot" @@ -32,5 +32,5 @@ name = "batch" harness = false [[bench]] -name = "iai" +name = "instructions" harness = false diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs deleted file mode 100644 index 8cbb9f10dd8..00000000000 --- a/core/tests/vm-benchmark/benches/iai.rs +++ /dev/null @@ -1,35 +0,0 @@ -use iai::black_box; -use vm_benchmark::{BenchmarkingVm, BenchmarkingVmFactory, Bytecode, Fast, Legacy}; - -fn run_bytecode(name: &str) { - let tx = Bytecode::get(name).deploy_tx(); - black_box(BenchmarkingVm::::default().run_transaction(&tx)); -} - -macro_rules! make_functions_and_main { - ($($file:ident => $legacy_name:ident,)+) => { - $( - fn $file() { - run_bytecode::(stringify!($file)); - } - - fn $legacy_name() { - run_bytecode::(stringify!($file)); - } - )+ - - iai::main!($($file, $legacy_name,)+); - }; -} - -make_functions_and_main!( - access_memory => access_memory_legacy, - call_far => call_far_legacy, - decode_shl_sub => decode_shl_sub_legacy, - deploy_simple_contract => deploy_simple_contract_legacy, - finish_eventful_frames => finish_eventful_frames_legacy, - write_and_decode => write_and_decode_legacy, - event_spam => event_spam_legacy, - slot_hash_collision => slot_hash_collision_legacy, - heap_read_write => heap_read_write_legacy, -); diff --git a/core/tests/vm-benchmark/benches/instructions.rs b/core/tests/vm-benchmark/benches/instructions.rs new file mode 100644 index 00000000000..654dfef71b2 --- /dev/null +++ b/core/tests/vm-benchmark/benches/instructions.rs @@ -0,0 +1,206 @@ +//! Measures the number of host instructions required to run the benchmark bytecodes. + +use std::{env, sync::mpsc}; + +use vise::{Gauge, LabeledFamily, Metrics}; +use vm_benchmark::{ + criterion::PrometheusRuntime, BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, BYTECODES, +}; +use yab::{ + reporter::{BenchmarkOutput, BenchmarkReporter, Reporter}, + AccessSummary, BenchMode, Bencher, BenchmarkId, +}; + +fn benchmarks_for_vm(bencher: &mut Bencher) { + bencher.bench( + BenchmarkId::new("init", VM::LABEL.as_str()), + BenchmarkingVm::::default, + ); + + for bytecode in BYTECODES { + bencher.bench_with_capture( + BenchmarkId::new(bytecode.name, VM::LABEL.as_str()), + |capture| { + let mut vm = yab::black_box(BenchmarkingVm::::default()); + let tx = yab::black_box(bytecode.deploy_tx()); + capture.measure(|| vm.run_transaction(&tx)); + }, + ); + } +} + +/// Reporter that pushes cachegrind metrics to Prometheus. +#[derive(Debug)] +struct MetricsReporter { + _runtime: Option, +} + +impl Default for MetricsReporter { + fn default() -> Self { + Self { + _runtime: PrometheusRuntime::new(), + } + } +} + +impl Reporter for MetricsReporter { + fn new_benchmark(&mut self, id: &BenchmarkId) -> Box { + Box::new(MetricsBenchmarkReporter(id.clone())) + } +} + +#[derive(Debug)] +struct MetricsBenchmarkReporter(BenchmarkId); + +impl BenchmarkReporter for MetricsBenchmarkReporter { + fn ok(self: Box, output: &BenchmarkOutput) { + #[derive(Debug, Metrics)] + #[metrics(prefix = "vm_cachegrind")] + struct VmCachegrindMetrics { + #[metrics(labels = ["benchmark"])] + instructions: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + l1_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + l2_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + ram_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + cycles: LabeledFamily>, + } + + #[vise::register] + static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); + + let id = self.0.to_string(); + VM_CACHEGRIND_METRICS.instructions[&id].set(output.stats.total_instructions()); + if let Some(&full) = output.stats.as_full() { + let summary = AccessSummary::from(full); + VM_CACHEGRIND_METRICS.l1_accesses[&id].set(summary.l1_hits); + VM_CACHEGRIND_METRICS.l2_accesses[&id].set(summary.l3_hits); + VM_CACHEGRIND_METRICS.ram_accesses[&id].set(summary.ram_accesses); + VM_CACHEGRIND_METRICS.cycles[&id].set(summary.estimated_cycles()); + } + } +} + +#[derive(Debug, Clone, Copy)] +struct Comparison { + current_cycles: u64, + prev_cycles: Option, +} + +impl Comparison { + fn percent_difference(a: u64, b: u64) -> f64 { + ((b as i64) - (a as i64)) as f64 / (a as f64) * 100.0 + } + + fn new(output: &BenchmarkOutput) -> Option { + let current_cycles = AccessSummary::from(*output.stats.as_full()?).estimated_cycles(); + let prev_cycles = if let Some(prev_stats) = &output.prev_stats { + Some(AccessSummary::from(*prev_stats.as_full()?).estimated_cycles()) + } else { + None + }; + + Some(Self { + current_cycles, + prev_cycles, + }) + } + + fn cycles_diff(&self) -> Option { + self.prev_cycles + .map(|prev_cycles| Self::percent_difference(prev_cycles, self.current_cycles)) + } +} + +/// Reporter that outputs diffs in a Markdown table to stdout after all benchmarks are completed. +/// +/// Significant diff level can be changed via `BENCHMARK_DIFF_THRESHOLD_PERCENT` env var; it is set to 1% by default. +#[derive(Debug)] +struct ComparisonReporter { + comparisons_sender: mpsc::Sender<(String, Comparison)>, + comparisons_receiver: mpsc::Receiver<(String, Comparison)>, +} + +impl Default for ComparisonReporter { + fn default() -> Self { + let (comparisons_sender, comparisons_receiver) = mpsc::channel(); + Self { + comparisons_sender, + comparisons_receiver, + } + } +} + +impl Reporter for ComparisonReporter { + fn new_benchmark(&mut self, id: &BenchmarkId) -> Box { + Box::new(BenchmarkComparison { + comparisons_sender: self.comparisons_sender.clone(), + id: id.clone(), + }) + } + + fn ok(self: Box) { + const ENV_VAR: &str = "BENCHMARK_DIFF_THRESHOLD_PERCENT"; + + let diff_threshold = env::var(ENV_VAR).unwrap_or_else(|_| "1.0".into()); + let diff_threshold: f64 = diff_threshold.parse().unwrap_or_else(|err| { + panic!("incorrect `{ENV_VAR}` value: {err}"); + }); + + // Drop the sender to not hang on the iteration below. + drop(self.comparisons_sender); + let mut comparisons: Vec<_> = self.comparisons_receiver.iter().collect(); + comparisons.retain(|(_, diff)| { + // Output all stats if `diff_threshold <= 0.0` since this is what the user expects + diff.cycles_diff().unwrap_or(0.0) >= diff_threshold + }); + if comparisons.is_empty() { + return; + } + + comparisons.sort_unstable_by(|(name, _), (other_name, _)| name.cmp(other_name)); + + println!("\n## Detected VM performance changes"); + println!("Benchmark name | Est. cycles | Change in est. cycles |"); + println!("|:---|---:|---:|"); + for (name, comparison) in &comparisons { + let diff = comparison + .cycles_diff() + .map_or_else(|| "N/A".to_string(), |diff| format!("{diff:+.1}%")); + println!("| {name} | {} | {diff} |", comparison.current_cycles); + } + } +} + +#[derive(Debug)] +struct BenchmarkComparison { + comparisons_sender: mpsc::Sender<(String, Comparison)>, + id: BenchmarkId, +} + +impl BenchmarkReporter for BenchmarkComparison { + fn ok(self: Box, output: &BenchmarkOutput) { + if let Some(diff) = Comparison::new(output) { + self.comparisons_sender + .send((self.id.to_string(), diff)) + .ok(); + } + } +} + +fn benchmarks(bencher: &mut Bencher) { + if bencher.mode() == BenchMode::PrintResults { + // Only customize reporting if outputting previously collected benchmark result in order to prevent + // reporters influencing cachegrind stats. + bencher + .add_reporter(MetricsReporter::default()) + .add_reporter(ComparisonReporter::default()); + } + benchmarks_for_vm::(bencher); + benchmarks_for_vm::(bencher); +} + +yab::main!(benchmarks); diff --git a/core/tests/vm-benchmark/src/bin/common/mod.rs b/core/tests/vm-benchmark/src/bin/common/mod.rs deleted file mode 100644 index a92c9d5f710..00000000000 --- a/core/tests/vm-benchmark/src/bin/common/mod.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::io::BufRead; - -#[derive(Debug)] -pub struct IaiResult { - pub name: String, - pub instructions: u64, - pub l1_accesses: u64, - pub l2_accesses: u64, - pub ram_accesses: u64, - pub cycles: u64, -} - -pub fn parse_iai(iai_output: R) -> impl Iterator { - IaiResultParser { - lines: iai_output.lines().map(|x| x.unwrap()), - } -} - -struct IaiResultParser> { - lines: I, -} - -impl> Iterator for IaiResultParser { - type Item = IaiResult; - - fn next(&mut self) -> Option { - self.lines.next().map(|name| { - let result = IaiResult { - name, - instructions: self.parse_stat(), - l1_accesses: self.parse_stat(), - l2_accesses: self.parse_stat(), - ram_accesses: self.parse_stat(), - cycles: self.parse_stat(), - }; - self.lines.next(); - result - }) - } -} - -impl> IaiResultParser { - fn parse_stat(&mut self) -> u64 { - let line = self.lines.next().unwrap(); - let number = line - .split(':') - .nth(1) - .unwrap() - .split_whitespace() - .next() - .unwrap(); - number.parse().unwrap() - } -} diff --git a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs deleted file mode 100644 index c274b039c9b..00000000000 --- a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs +++ /dev/null @@ -1,108 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - fs::File, - io::{BufRead, BufReader}, -}; - -pub use crate::common::parse_iai; - -mod common; - -fn main() { - let [iai_before, iai_after, opcodes_before, opcodes_after] = std::env::args() - .skip(1) - .take(4) - .collect::>() - .try_into() - .expect("expected four arguments"); - - let iai_before = get_name_to_cycles(&iai_before); - let iai_after = get_name_to_cycles(&iai_after); - let opcodes_before = get_name_to_opcodes(&opcodes_before); - let opcodes_after = get_name_to_opcodes(&opcodes_after); - - let perf_changes = iai_before - .keys() - .collect::>() - .intersection(&iai_after.keys().collect()) - .map(|&name| (name, percent_difference(iai_before[name], iai_after[name]))) - .collect::>(); - - let duration_changes = opcodes_before - .keys() - .collect::>() - .intersection(&opcodes_after.keys().collect()) - .map(|&name| { - let opcodes_abs_diff = (opcodes_after[name] as i64) - (opcodes_before[name] as i64); - (name, opcodes_abs_diff) - }) - .collect::>(); - - let mut nonzero_diff = false; - - for name in perf_changes - .iter() - .filter_map(|(key, value)| (value.abs() > 2.).then_some(key)) - .collect::>() - .union( - &duration_changes - .iter() - .filter_map(|(key, value)| (*value != 0).then_some(key)) - .collect(), - ) - { - // write the header before writing the first line of diff - if !nonzero_diff { - println!("Benchmark name | change in estimated runtime | change in number of opcodes executed \n--- | --- | ---"); - nonzero_diff = true; - } - - let n_a = "N/A".to_string(); - println!( - "{} | {} | {}", - name, - perf_changes - .get(**name) - .map(|percent| format!("{:+.1}%", percent)) - .unwrap_or(n_a.clone()), - duration_changes - .get(**name) - .map(|abs_diff| format!( - "{:+} ({:+.1}%)", - abs_diff, - percent_difference(opcodes_before[**name], opcodes_after[**name]) - )) - .unwrap_or(n_a), - ); - } - - if nonzero_diff { - println!("\n Changes in number of opcodes executed indicate that the gas price of the benchmark has changed, which causes it run out of gas at a different time. Or that it is behaving completely differently."); - } -} - -fn percent_difference(a: u64, b: u64) -> f64 { - ((b as f64) - (a as f64)) / (a as f64) * 100.0 -} - -fn get_name_to_cycles(filename: &str) -> HashMap { - parse_iai(BufReader::new( - File::open(filename).expect("failed to open file"), - )) - .map(|x| (x.name, x.cycles)) - .collect() -} - -fn get_name_to_opcodes(filename: &str) -> HashMap { - BufReader::new(File::open(filename).expect("failed to open file")) - .lines() - .map(|line| { - let line = line.unwrap(); - let mut it = line.split_whitespace(); - ( - it.next().unwrap().to_string(), - it.next().unwrap().parse().unwrap(), - ) - }) - .collect() -} diff --git a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs deleted file mode 100644 index 3b3aa05bf69..00000000000 --- a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::{env, io::BufReader, time::Duration}; - -use tokio::sync::watch; -use vise::{Gauge, LabeledFamily, Metrics}; -use zksync_vlog::prometheus::PrometheusExporterConfig; - -use crate::common::{parse_iai, IaiResult}; - -mod common; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_cachegrind")] -pub(crate) struct VmCachegrindMetrics { - #[metrics(labels = ["benchmark"])] - pub instructions: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l1_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l2_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub ram_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub cycles: LabeledFamily>, -} - -#[vise::register] -pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); - -#[tokio::main] -async fn main() { - let results: Vec = parse_iai(BufReader::new(std::io::stdin())).collect(); - - let endpoint = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL") - .expect("`BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL` env var is not set"); - let (stop_sender, stop_receiver) = watch::channel(false); - let prometheus_config = - PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); - tokio::spawn(prometheus_config.run(stop_receiver)); - - for result in results { - let name = result.name; - VM_CACHEGRIND_METRICS.instructions[&name.clone()].set(result.instructions); - VM_CACHEGRIND_METRICS.l1_accesses[&name.clone()].set(result.l1_accesses); - VM_CACHEGRIND_METRICS.l2_accesses[&name.clone()].set(result.l2_accesses); - VM_CACHEGRIND_METRICS.ram_accesses[&name.clone()].set(result.ram_accesses); - VM_CACHEGRIND_METRICS.cycles[&name].set(result.cycles); - } - - println!("Waiting for push to happen..."); - tokio::time::sleep(Duration::from_secs(1)).await; - stop_sender.send_replace(true); -} diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs index 96208007fd9..ece30a66cee 100644 --- a/core/tests/vm-benchmark/src/bin/instruction_counts.rs +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -1,16 +1,100 @@ //! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. -use vm_benchmark::{BenchmarkingVmFactory, Fast, Legacy, BYTECODES}; +use std::{collections::BTreeMap, env, fs, io, path::PathBuf}; -fn main() { - for bytecode in BYTECODES { - let tx = bytecode.deploy_tx(); - let name = bytecode.name; - println!("{name} {}", Fast::<()>::count_instructions(&tx)); - println!( - "{} {}", - name.to_string() + "_legacy", - Legacy::count_instructions(&tx) - ); +use vm_benchmark::{CountInstructions, Fast, Legacy, BYTECODES}; + +#[derive(Debug)] +enum Command { + Print, + Diff { old: PathBuf }, +} + +impl Command { + fn from_env() -> Self { + let mut args = env::args().skip(1); + let Some(first) = args.next() else { + return Self::Print; + }; + assert_eq!(first, "--diff", "Unsupported command-line arg"); + let old = args.next().expect("`--diff` requires a path to old file"); + Self::Diff { old: old.into() } } + + fn print_instructions(counts: &BTreeMap<&str, usize>) { + for (bytecode_name, count) in counts { + println!("{bytecode_name} {count}"); + } + } + + fn parse_counts(reader: impl io::BufRead) -> BTreeMap { + let mut counts = BTreeMap::new(); + for line in reader.lines() { + let line = line.unwrap(); + if line.is_empty() { + continue; + } + let (name, count) = line.split_once(' ').expect("invalid output format"); + let count = count.parse().unwrap_or_else(|err| { + panic!("invalid count for `{name}`: {err}"); + }); + counts.insert(name.to_owned(), count); + } + counts + } + + fn run(self) { + let counts: BTreeMap<_, _> = BYTECODES + .iter() + .map(|bytecode| { + let tx = bytecode.deploy_tx(); + // We have a unit test comparing stats, but do it here as well just in case. + let fast_count = Fast::count_instructions(&tx); + let legacy_count = Legacy::count_instructions(&tx); + assert_eq!( + fast_count, legacy_count, + "mismatch on number of instructions on bytecode `{}`", + bytecode.name + ); + + (bytecode.name, fast_count) + }) + .collect(); + + match self { + Self::Print => Self::print_instructions(&counts), + Self::Diff { old } => { + let file = fs::File::open(&old).unwrap_or_else(|err| { + panic!("failed opening `{}`: {err}", old.display()); + }); + let reader = io::BufReader::new(file); + let old_counts = Self::parse_counts(reader); + + let differing_counts: Vec<_> = counts + .iter() + .filter_map(|(&name, &new_count)| { + let old_count = *old_counts.get(name)?; + (old_count != new_count).then_some((name, old_count, new_count)) + }) + .collect(); + + if !differing_counts.is_empty() { + println!("## ⚠ Detected differing instruction counts"); + println!("| Benchmark | Old count | New count |"); + println!("|-----------|----------:|----------:|"); + for (name, old_count, new_count) in differing_counts { + println!("| {name} | {old_count} | {new_count} |"); + } + println!( + "\nChanges in number of opcodes executed indicate that the gas price of the benchmark has changed, \ + which causes it to run out of gas at a different time." + ); + } + } + } + } +} + +fn main() { + Command::from_env().run(); } diff --git a/core/tests/vm-benchmark/src/criterion.rs b/core/tests/vm-benchmark/src/criterion.rs index 9515ac4ef98..024ccf14139 100644 --- a/core/tests/vm-benchmark/src/criterion.rs +++ b/core/tests/vm-benchmark/src/criterion.rs @@ -57,7 +57,7 @@ struct VmBenchmarkMetrics { static METRICS: vise::Global = vise::Global::new(); #[derive(Debug)] -struct PrometheusRuntime { +pub struct PrometheusRuntime { stop_sender: watch::Sender, _runtime: tokio::runtime::Runtime, } @@ -72,7 +72,7 @@ impl Drop for PrometheusRuntime { } impl PrometheusRuntime { - fn new() -> Option { + pub fn new() -> Option { const PUSH_INTERVAL: Duration = Duration::from_millis(100); let gateway_url = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL").ok()?; @@ -164,7 +164,7 @@ thread_local! { static BIN_NAME: SyncOnceCell<&'static str> = SyncOnceCell::new(); -/// Measurement for criterion that exports . +/// Measurement for criterion that exports timing-related metrics. #[derive(Debug)] pub struct MeteredTime { _prometheus: Option, diff --git a/core/tests/vm-benchmark/src/lib.rs b/core/tests/vm-benchmark/src/lib.rs index 4bd008d3319..9c4f547c1de 100644 --- a/core/tests/vm-benchmark/src/lib.rs +++ b/core/tests/vm-benchmark/src/lib.rs @@ -6,7 +6,7 @@ pub use crate::{ get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, }, - vm::{BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, VmLabel}, + vm::{BenchmarkingVm, BenchmarkingVmFactory, CountInstructions, Fast, Legacy, VmLabel}, }; pub mod criterion; diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index e198be9ea6b..bf969e0de5c 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -72,19 +72,21 @@ pub trait BenchmarkingVmFactory { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance; +} +pub trait CountInstructions { /// Counts instructions executed by the VM while processing the transaction. fn count_instructions(tx: &Transaction) -> usize; } /// Factory for the new / fast VM. #[derive(Debug)] -pub struct Fast(Tr); +pub struct Fast; -impl BenchmarkingVmFactory for Fast { +impl BenchmarkingVmFactory for Fast { const LABEL: VmLabel = VmLabel::Fast; - type Instance = vm_fast::Vm<&'static InMemoryStorage, Tr>; + type Instance = vm_fast::Vm<&'static InMemoryStorage>; fn create( batch_env: L1BatchEnv, @@ -93,27 +95,30 @@ impl BenchmarkingVmFactory for Fast ) -> Self::Instance { vm_fast::Vm::custom(batch_env, system_env, storage) } +} +impl CountInstructions for Fast { fn count_instructions(tx: &Transaction) -> usize { - let mut vm = BenchmarkingVm::>::default(); - vm.0.push_transaction(tx.clone()); + use vm_fast::interface as vm2; #[derive(Default)] struct InstructionCount(usize); - impl vm_fast::Tracer for InstructionCount { - fn before_instruction< - OP: zksync_vm2::interface::OpcodeType, - S: zksync_vm2::interface::GlobalStateInterface, - >( + + impl vm2::Tracer for InstructionCount { + fn before_instruction( &mut self, _: &mut S, ) { self.0 += 1; } } - let mut tracer = InstructionCount(0); - vm.0.inspect(&mut tracer, InspectExecutionMode::OneTx); + let (system_env, l1_batch_env) = test_env(); + let mut vm = + vm_fast::Vm::<_, InstructionCount>::custom(l1_batch_env, system_env, &*STORAGE); + vm.push_transaction(tx.clone()); + let mut tracer = InstructionCount(0); + vm.inspect(&mut tracer, InspectExecutionMode::OneTx); tracer.0 } } @@ -135,7 +140,9 @@ impl BenchmarkingVmFactory for Legacy { let storage = StorageView::new(storage).to_rc_ptr(); vm_latest::Vm::new(batch_env, system_env, storage) } +} +impl CountInstructions for Legacy { fn count_instructions(tx: &Transaction) -> usize { let mut vm = BenchmarkingVm::::default(); vm.0.push_transaction(tx.clone()); @@ -150,41 +157,44 @@ impl BenchmarkingVmFactory for Legacy { } } +fn test_env() -> (SystemEnv, L1BatchEnv) { + let timestamp = unix_timestamp_ms(); + let system_env = SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + }; + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: L1BatchNumber(1), + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + }; + (system_env, l1_batch_env) +} + #[derive(Debug)] pub struct BenchmarkingVm(VM::Instance); impl Default for BenchmarkingVm { fn default() -> Self { - let timestamp = unix_timestamp_ms(); - Self(VM::create( - L1BatchEnv { - previous_batch_hash: None, - number: L1BatchNumber(1), - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - }, - SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - &STORAGE, - )) + let (system_env, l1_batch_env) = test_env(); + Self(VM::create(l1_batch_env, system_env, &STORAGE)) } } @@ -231,7 +241,7 @@ mod tests { use super::*; use crate::{ get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, - get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, + get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, BYTECODES, }; #[test] @@ -282,4 +292,22 @@ mod tests { let res = vm.run_transaction(&get_heavy_load_test_tx(1)); assert_matches!(res.result, ExecutionResult::Success { .. }); } + + #[test] + fn instruction_count_matches_on_both_vms_for_transfer() { + let tx = get_transfer_tx(0); + let legacy_count = Legacy::count_instructions(&tx); + let fast_count = Fast::count_instructions(&tx); + assert_eq!(legacy_count, fast_count); + } + + #[test] + fn instruction_count_matches_on_both_vms_for_benchmark_bytecodes() { + for bytecode in BYTECODES { + let tx = bytecode.deploy_tx(); + let legacy_count = Legacy::count_instructions(&tx); + let fast_count = Fast::count_instructions(&tx); + assert_eq!(legacy_count, fast_count, "bytecode: {}", bytecode.name); + } + } } From 6ee9f1f431f95514d58db87a4562e09df9d09f86 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 28 Oct 2024 17:58:15 +0100 Subject: [PATCH 137/140] fix(consensus): made attestation controller non-critical (#3180) The attestation logic in consensus component is experimental, while p2p synchronization is critical. I've made the attestation controller non-critical, i.e. if attestation controller fails, an error is logged, but the consensus component keeps working (on both main node and external node). This should prevent situations like in https://www.notion.so/matterlabs/mainnet2-p2p-synchronization-downtime-12aa48363f2380e6b8e0c8e1c3728201?pvs=4 --- core/node/consensus/src/en.rs | 25 +++-- core/node/consensus/src/mn.rs | 201 ++++++++++++++++++---------------- 2 files changed, 123 insertions(+), 103 deletions(-) diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 5e9aadc8f37..6e3619f57e2 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -127,7 +127,7 @@ impl EN { ) .await .wrap("Store::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("Store::runner()")?) }); // Run the temporary fetcher until the certificates are backfilled. // Temporary fetcher should be removed once json RPC syncing is fully deprecated. @@ -146,14 +146,25 @@ impl EN { let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("BlockStore::run()")?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(self.run_attestation_controller( - ctx, - global_config.clone(), - attestation.clone(), - )); + s.spawn_bg({ + let global_config = global_config.clone(); + let attestation = attestation.clone(); + async { + let res = self + .run_attestation_controller(ctx, global_config, attestation) + .await + .wrap("run_attestation_controller()"); + // Attestation currently is not critical for the node to function. + // If it fails, we just log the error and continue. + if let Err(err) = res { + tracing::error!("attestation controller failed: {err:#}"); + } + Ok(()) + } + }); let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, build_version)?, diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 2a280b2f161..a392acfbe5f 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -30,7 +30,7 @@ pub async fn run_main_node( tracing::debug!(is_attester = attester.is_some(), "main node attester mode"); - scope::run!(&ctx, |ctx, s| async { + let res: ctx::Result<()> = scope::run!(&ctx, |ctx, s| async { if let Some(spec) = &cfg.genesis_spec { let spec = config::GenesisSpec::parse(spec).context("GenesisSpec::parse()")?; @@ -46,7 +46,7 @@ pub async fn run_main_node( let (store, runner) = Store::new(ctx, pool.clone(), None, None) .await .wrap("Store::new()")?; - s.spawn_bg(runner.run(ctx)); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("Store::runner()")?) }); let global_config = pool .connection(ctx) @@ -56,25 +56,36 @@ pub async fn run_main_node( .await .wrap("global_config()")? .context("global_config() disappeared")?; - anyhow::ensure!( - global_config.genesis.leader_selection - == validator::LeaderSelectionMode::Sticky(validator_key.public()), - "unsupported leader selection mode - main node has to be the leader" - ); + if global_config.genesis.leader_selection + != validator::LeaderSelectionMode::Sticky(validator_key.public()) + { + return Err(anyhow::format_err!( + "unsupported leader selection mode - main node has to be the leader" + ) + .into()); + } let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("BlockStore::run()")?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(run_attestation_controller( - ctx, - &pool, - global_config.clone(), - attestation.clone(), - )); - + s.spawn_bg({ + let global_config = global_config.clone(); + let attestation = attestation.clone(); + async { + let res = run_attestation_controller(ctx, &pool, global_config, attestation) + .await + .wrap("run_attestation_controller()"); + // Attestation currently is not critical for the node to function. + // If it fails, we just log the error and continue. + if let Err(err) = res { + tracing::error!("attestation controller failed: {err:#}"); + } + Ok(()) + } + }); let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, @@ -87,9 +98,14 @@ pub async fn run_main_node( }; tracing::info!("running the main node executor"); - executor.run(ctx).await + executor.run(ctx).await.context("executor")?; + Ok(()) }) - .await + .await; + match res { + Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), + Err(ctx::Error::Internal(err)) => Err(err), + } } /// Manages attestation state by configuring the @@ -100,91 +116,84 @@ async fn run_attestation_controller( pool: &ConnectionPool, cfg: consensus_dal::GlobalConfig, attestation: Arc, -) -> anyhow::Result<()> { +) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); let registry = registry::Registry::new(cfg.genesis, pool.clone()).await; let registry_addr = cfg.registry_address.map(registry::Address::new); let mut next = attester::BatchNumber(0); - let res = async { - loop { - // After regenesis it might happen that the batch number for the first block - // is not immediately known (the first block was not produced yet), - // therefore we need to wait for it. - let status = loop { - match pool - .connection(ctx) - .await - .wrap("connection()")? - .attestation_status(ctx) - .await - .wrap("attestation_status()")? - { - Some(status) if status.next_batch_to_attest >= next => break status, - _ => {} - } - ctx.sleep(POLL_INTERVAL).await?; - }; - next = status.next_batch_to_attest.next(); - tracing::info!( - "waiting for hash of batch {:?}", - status.next_batch_to_attest - ); - let info = pool - .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) - .await?; - let hash = consensus_dal::batch_hash(&info); - let Some(committee) = registry - .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) - .await - .wrap("attester_committee_for()")? - else { - tracing::info!("attestation not required"); - continue; - }; - let committee = Arc::new(committee); - // Persist the derived committee. - pool.connection(ctx) - .await - .wrap("connection")? - .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) - .await - .wrap("upsert_attester_committee()")?; - tracing::info!( - "attesting batch {:?} with hash {hash:?}", - status.next_batch_to_attest - ); - attestation - .start_attestation(Arc::new(attestation::Info { - batch_to_attest: attester::Batch { - hash, - number: status.next_batch_to_attest, - genesis: status.genesis, - }, - committee, - })) - .await - .context("start_attestation()")?; - // Main node is the only node which can update the global AttestationStatus, - // therefore we can synchronously wait for the certificate. - let qc = attestation - .wait_for_cert(ctx, status.next_batch_to_attest) - .await? - .context("attestation config has changed unexpectedly")?; - tracing::info!( - "collected certificate for batch {:?}", - status.next_batch_to_attest - ); - pool.connection(ctx) + loop { + // After regenesis it might happen that the batch number for the first block + // is not immediately known (the first block was not produced yet), + // therefore we need to wait for it. + let status = loop { + match pool + .connection(ctx) .await .wrap("connection()")? - .insert_batch_certificate(ctx, &qc) + .attestation_status(ctx) .await - .wrap("insert_batch_certificate()")?; - } - } - .await; - match res { - Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), - Err(ctx::Error::Internal(err)) => Err(err), + .wrap("attestation_status()")? + { + Some(status) if status.next_batch_to_attest >= next => break status, + _ => {} + } + ctx.sleep(POLL_INTERVAL).await?; + }; + next = status.next_batch_to_attest.next(); + tracing::info!( + "waiting for hash of batch {:?}", + status.next_batch_to_attest + ); + let info = pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) + .await?; + let hash = consensus_dal::batch_hash(&info); + let Some(committee) = registry + .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + pool.connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; + tracing::info!( + "attesting batch {:?} with hash {hash:?}", + status.next_batch_to_attest + ); + attestation + .start_attestation(Arc::new(attestation::Info { + batch_to_attest: attester::Batch { + hash, + number: status.next_batch_to_attest, + genesis: status.genesis, + }, + committee, + })) + .await + .context("start_attestation()")?; + // Main node is the only node which can update the global AttestationStatus, + // therefore we can synchronously wait for the certificate. + let qc = attestation + .wait_for_cert(ctx, status.next_batch_to_attest) + .await? + .context("attestation config has changed unexpectedly")?; + tracing::info!( + "collected certificate for batch {:?}", + status.next_batch_to_attest + ); + pool.connection(ctx) + .await + .wrap("connection()")? + .insert_batch_certificate(ctx, &qc) + .await + .wrap("insert_batch_certificate()")?; } } From a5df3c2b3353971220eadafac345ca6e206b9b45 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 28 Oct 2024 21:49:58 +0200 Subject: [PATCH 138/140] feat: merge main to sync-layer-stable (#3182) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .githooks/pre-push | 25 +- .github/pull_request_template.md | 2 +- .github/release-please/config.json | 4 +- .github/release-please/manifest.json | 4 +- .../build-contract-verifier-template.yml | 9 +- .github/workflows/build-core-template.yml | 14 +- .github/workflows/build-local-node-docker.yml | 12 +- ...fri-gpu-gar-and-circuit-prover-gpu-gar.yml | 5 +- .github/workflows/build-prover-template.yml | 5 +- .../build-witness-generator-template.yml | 1 - .github/workflows/ci-common-reusable.yml | 10 +- .github/workflows/ci-core-lint-reusable.yml | 25 +- .github/workflows/ci-core-reusable.yml | 440 +- .github/workflows/ci-docs-reusable.yml | 9 +- .github/workflows/ci-prover-e2e.yml | 126 + .github/workflows/ci-prover-reusable.yml | 24 +- .github/workflows/ci.yml | 15 +- .../new-build-contract-verifier-template.yml | 291 +- .github/workflows/new-build-core-template.yml | 39 +- .../workflows/new-build-prover-template.yml | 15 +- .github/workflows/release-test-stage.yml | 15 +- .github/workflows/vm-perf-comparison.yml | 125 +- .github/workflows/vm-perf-to-prometheus.yml | 11 +- .github/workflows/zk-environment-publish.yml | 28 +- .gitignore | 5 +- Cargo.lock | 217 +- Cargo.toml | 195 +- bin/ci_localnet_up | 1 - .../batch_availability_checker | 40 + bin/prover_checkers/batch_l1_status_checker | 54 + bin/prover_checkers/kill_prover | 12 + .../prover_jobs_status_checker | 42 + bin/run_on_all_chains.sh | 41 + bin/zk | 1 + bin/zkt | 17 - core/CHANGELOG.md | 98 + core/bin/external_node/Cargo.toml | 2 +- core/bin/external_node/src/config/mod.rs | 10 +- core/bin/external_node/src/node_builder.rs | 61 +- core/bin/external_node/src/tests/mod.rs | 53 +- core/bin/genesis_generator/src/main.rs | 1 + .../system-constants-generator/src/utils.rs | 27 +- core/bin/zksync_server/src/main.rs | 2 +- core/bin/zksync_server/src/node_builder.rs | 52 +- core/bin/zksync_tee_prover/Cargo.toml | 2 +- core/bin/zksync_tee_prover/src/api_client.rs | 36 +- core/bin/zksync_tee_prover/src/main.rs | 11 +- core/bin/zksync_tee_prover/src/metrics.rs | 4 +- core/bin/zksync_tee_prover/src/tee_prover.rs | 11 +- core/lib/basic_types/src/api_key.rs | 20 + core/lib/basic_types/src/commitment.rs | 4 +- core/lib/basic_types/src/lib.rs | 18 + core/lib/basic_types/src/protocol_version.rs | 20 +- .../{types => basic_types}/src/pubdata_da.rs | 23 +- core/lib/basic_types/src/web3/mod.rs | 29 + core/lib/basic_types/src/web3/tests.rs | 10 + core/lib/config/Cargo.toml | 4 + core/lib/config/src/configs/api.rs | 34 +- core/lib/config/src/configs/chain.rs | 3 + core/lib/config/src/configs/consensus.rs | 1 + core/lib/config/src/configs/contracts.rs | 10 +- .../lib/config/src/configs/da_client/avail.rs | 28 +- core/lib/config/src/configs/en_config.rs | 3 +- core/lib/config/src/configs/eth_sender.rs | 33 +- core/lib/config/src/configs/genesis.rs | 2 + core/lib/config/src/configs/mod.rs | 3 +- .../config/src/configs/proof_data_handler.rs | 33 +- .../config/src/configs/prover_autoscaler.rs | 128 + core/lib/config/src/testonly.rs | 40 +- core/lib/constants/src/contracts.rs | 9 + core/lib/constants/src/lib.rs | 1 + core/lib/constants/src/message_root.rs | 5 + core/lib/constants/src/system_logs.rs | 9 +- core/lib/contracts/src/lib.rs | 220 +- ...16fe37110ebc3fb3981b2626a0bf2edd00e69.json | 40 + ...ad2574cd1310dff1d1bf06825d5634ba25f04.json | 30 - ...98869d490ea0a96aa9d5b9a22b34ab0f8f47.json} | 4 +- ...81f4625ebd593aa4cd2bae79bcc0637387d78.json | 22 + ...01396dacefc0cea8cbcf5807185eb00fc0f7.json} | 40 +- ...ed820f8869acb6f59aa6dd704c0f5b4e45ec.json} | 4 +- ...7791290f3bfff4de742f2a918a3fd4e5608c.json} | 4 +- ...f7cd9c8486d1613319e1f6bc038ddff539f8.json} | 4 +- ...9027b18d108a05f5855115ba36045e3b1850.json} | 12 +- ...0cc9e176729744c779fee97ca9392ae8a8c8.json} | 56 +- ...94ec52b3eb68c346492a8fed98f20f2a0381d.json | 36 - ...62af196d586cc08ea0f23d2c568527e94b41d.json | 12 - ...e494ce1d8e3b6cfb0b897745fb596f283be79.json | 52 - ...1717e73c5e6b063be3553d82bfecb98334980.json | 24 - ...892118f5732374e62f35e27800422afb5746.json} | 40 +- ...2f38816f163a3e3fba4fdbb81076b969e970.json} | 40 +- ...fbde6eb6634d7a63005081ffc1eb6c28e9ec.json} | 22 +- ...3010bea7cee09a9538a4e275ea89f67704966.json | 23 + ...6f1a607a0bcc6864490c5961dd4e2ee12ed78.json | 22 - ...5e94ad6bdd84c31e4b2e0c629e51857533974.json | 23 - ...f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json | 23 + ...3ba84478b6e56c95dfae6d8cc84e938e80c6.json} | 4 +- ...911add046315e5f8877bc57a34e3dadf9e37.json} | 44 +- ...5aea6710351dea1f1e57d73447476c3fcd199.json | 28 - ...e0294e53eb37c1a2dbcc3044b8311200d549a.json | 33 + ...7bd02627ebaf2df7c5ad517cb60a243182d2.json} | 20 +- ...3369701d7cd5f75ca031bf77ca27d0437cb9.json} | 42 +- ...233f6503bc79cc9f809d35c558e275ba117ba.json | 33 + ...4ba52c9a0e64d1badc39cc2fef29b1468621a.json | 56 + ...70e7a5fe02b60d5d23e4d153190138112c5b.json} | 12 +- ...d3b4c514a18d8a33ec978d3e8007af8d0c20.json} | 4 +- ...526c586708c812dc00b10bf3cd8aa871d9c2.json} | 4 +- ...c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json | 28 - ...019baa12323fd3ef381fdacf290a3db3ec77.json} | 4 +- ...63f500b8358757e5a453cf0a87d5cd9620d7e.json | 33 - ...806fcc54d73216a7dc54be6ba210ef02d789.json} | 40 +- ...ac6f0b00c05229a0bd40902d5fcb1c1bf026.json} | 4 +- ...5e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json} | 10 +- ...3c39e774c405508e73e77cdd0c01f924c97c0.json | 40 - ...e82c5aa84c85b9486e81261d17901a786917.json} | 5 +- ...0a5b8081edf28fa1b67f71101d2e3621be798.json | 20 + ...99bf19b587a16ad70a671b0de48fd608bf31c.json | 23 - ...0e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json} | 40 +- ...65162bce330edd9b16587e8f9fdab17a8456.json} | 12 +- ...673e4b5bba059ebe07bbbb64578881db030b.json} | 40 +- ...35bfda52cc5bb5a4bfb11270a2a784491c967.json | 30 + ...c058f9ad703461a1f55c534bf3d9f48eb61b.json} | 4 +- ...81b01395cfd2a3e95fb4593229bd878163320.json | 26 + ...926df634ebf0d8286181fa04884fb747cee8.json} | 12 +- ...ebfcd3b9c287fecde4376afa84c0566a55ef.json} | 4 +- ...1cd8aa22592f0808f3c2f0555ca321459815e.json | 22 + ...ddb30ca0d9ea0190786b8e8472c622e98b9c.json} | 4 +- ...7999eabb611338925abe9dc9e64c837183d9.json} | 56 +- ...729b9149fee37c5ef7d69e259ee33cb8ca860.json | 65 - ...3cab780f7ed1d91199b4d34011cdc9376c005.json | 22 - ...15f75badf7801f687af19163f5f533e20fbc7.json | 32 - ...b243bb067514b67daaf084353e5ada15b23a.json} | 10 +- ...9542f0dba42101b32e026751362e169381662.json | 22 + ...223f4599d4128db588d8645f3d106de5f50b.json} | 8 +- core/lib/dal/Cargo.toml | 3 + ...0910112120_unsealed_batches_in_db.down.sql | 5 + ...240910112120_unsealed_batches_in_db.up.sql | 5 + .../20240911161714_evm-simulator.down.sql | 3 + .../20240911161714_evm-simulator.up.sql | 4 + ...e_tee_verifier_input_producer_job.down.sql | 20 + ...ove_tee_verifier_input_producer_job.up.sql | 3 + core/lib/dal/src/blocks_dal.rs | 405 +- core/lib/dal/src/blocks_web3_dal.rs | 38 +- core/lib/dal/src/consensus/conv.rs | 589 ++ core/lib/dal/src/consensus/mod.rs | 563 +- core/lib/dal/src/consensus/proto/mod.proto | 14 +- core/lib/dal/src/consensus/testonly.rs | 31 +- core/lib/dal/src/consensus/tests.rs | 42 +- .../mod.rs} | 341 +- core/lib/dal/src/consensus_dal/tests.rs | 189 + core/lib/dal/src/factory_deps_dal.rs | 18 + core/lib/dal/src/lib.rs | 10 +- core/lib/dal/src/models/mod.rs | 15 +- core/lib/dal/src/models/storage_block.rs | 71 +- .../src/models/storage_protocol_version.rs | 10 + core/lib/dal/src/models/storage_sync.rs | 29 +- .../lib/dal/src/models/storage_transaction.rs | 26 +- core/lib/dal/src/protocol_versions_dal.rs | 55 +- .../lib/dal/src/protocol_versions_web3_dal.rs | 1 + core/lib/dal/src/storage_web3_dal.rs | 22 +- core/lib/dal/src/sync_dal.rs | 3 + core/lib/dal/src/tee_proof_generation_dal.rs | 183 +- .../src/tee_verifier_input_producer_dal.rs | 234 - core/lib/dal/src/tests/mod.rs | 4 +- core/lib/dal/src/transactions_dal.rs | 23 +- core/lib/dal/src/transactions_web3_dal.rs | 60 +- core/lib/dal/src/vm_runner_dal.rs | 4 + core/lib/env_config/src/api.rs | 2 + core/lib/env_config/src/chain.rs | 1 + core/lib/env_config/src/contracts.rs | 2 +- core/lib/env_config/src/da_client.rs | 80 +- core/lib/env_config/src/eth_sender.rs | 5 +- core/lib/env_config/src/genesis.rs | 1 + core/lib/env_config/src/proof_data_handler.rs | 14 +- core/lib/eth_client/src/clients/http/query.rs | 17 +- core/lib/eth_client/src/types.rs | 12 +- core/lib/eth_signer/Cargo.toml | 9 +- core/lib/eth_signer/src/error.rs | 1 - core/lib/eth_signer/src/lib.rs | 3 +- core/lib/eth_signer/src/pk_signer.rs | 65 +- core/lib/eth_signer/src/raw_ethereum_tx.rs | 6 +- core/lib/external_price_api/Cargo.toml | 4 + core/lib/external_price_api/src/cmc_api.rs | 357 + core/lib/external_price_api/src/lib.rs | 1 + core/lib/external_price_api/src/tests.rs | 8 +- core/lib/l1_contract_interface/Cargo.toml | 2 + .../src/i_executor/methods/commit_batches.rs | 4 +- .../structures/commit_batch_info.rs | 30 +- .../src/i_executor/structures/mod.rs | 3 + .../structures/stored_batch_info.rs | 63 +- .../src/i_executor/structures/tests.rs | 32 + .../src/multicall3/mod.rs | 2 + core/lib/mempool/src/mempool_store.rs | 55 +- core/lib/mempool/src/tests.rs | 50 +- core/lib/merkle_tree/src/domain.rs | 27 +- core/lib/merkle_tree/src/errors.rs | 2 + core/lib/merkle_tree/src/lib.rs | 2 +- core/lib/merkle_tree/src/storage/rocksdb.rs | 27 +- .../merkle_tree/src/storage/serialization.rs | 59 +- core/lib/merkle_tree/src/types/internal.rs | 88 +- core/lib/merkle_tree/src/types/mod.rs | 2 +- .../merkle_tree/tests/integration/domain.rs | 25 + core/lib/multivm/Cargo.toml | 7 +- core/lib/multivm/README.md | 14 + .../src/glue/types/vm/vm_block_result.rs | 6 + .../types/vm/vm_partial_execution_result.rs | 3 + .../glue/types/vm/vm_tx_execution_result.rs | 5 + core/lib/multivm/src/lib.rs | 3 +- core/lib/multivm/src/pubdata_builders/mod.rs | 24 + .../multivm/src/pubdata_builders/rollup.rs | 128 + .../lib/multivm/src/pubdata_builders/tests.rs | 123 + .../lib/multivm/src/pubdata_builders/utils.rs | 70 + .../multivm/src/pubdata_builders/validium.rs | 93 + core/lib/multivm/src/tracers/validator/mod.rs | 22 +- core/lib/multivm/src/utils/events.rs | 56 +- core/lib/multivm/src/utils/mod.rs | 12 +- core/lib/multivm/src/versions/README.md | 17 - core/lib/multivm/src/versions/mod.rs | 4 +- .../src/versions/{tests.rs => shadow/mod.rs} | 27 +- core/lib/multivm/src/versions/shadow/tests.rs | 427 + core/lib/multivm/src/versions/testonly.rs | 96 - .../src/versions/testonly/block_tip.rs | 390 + .../tests => testonly}/bootloader.rs | 39 +- .../versions/testonly/bytecode_publishing.rs | 44 + .../{vm_1_4_2/tests => testonly}/circuits.rs | 36 +- .../src/versions/testonly/code_oracle.rs | 242 + .../tests => testonly}/default_aa.rs | 65 +- .../src/versions/testonly/gas_limit.rs | 34 + .../versions/testonly/get_used_contracts.rs | 219 + .../tests => testonly}/is_write_initial.rs | 25 +- .../tests => testonly}/l1_tx_execution.rs | 82 +- .../{vm_1_4_1/tests => testonly}/l2_blocks.rs | 223 +- core/lib/multivm/src/versions/testonly/mod.rs | 243 + .../src/versions/testonly/nonce_holder.rs | 200 + .../src/versions/testonly/precompiles.rs | 110 + .../tests => testonly}/refunds.rs | 148 +- .../tests => testonly}/require_eip712.rs | 92 +- .../tests => testonly}/rollbacks.rs | 134 +- .../sekp256r1.rs => testonly/secp256r1.rs} | 15 +- .../tests => testonly}/simple_execution.rs | 41 +- .../multivm/src/versions/testonly/storage.rs | 125 + .../src/versions/testonly/tester/mod.rs | 231 + .../tester/transaction_test_info.rs | 52 +- .../tracing_execution_error.rs | 42 +- .../multivm/src/versions/testonly/transfer.rs | 208 + .../{vm_1_4_2/tests => testonly}/upgrade.rs | 178 +- .../multivm/src/versions/vm_1_3_2/utils.rs | 9 +- core/lib/multivm/src/versions/vm_1_3_2/vm.rs | 68 +- .../versions/vm_1_3_2/vm_with_bootloader.rs | 19 +- .../vm_1_4_1/bootloader_state/utils.rs | 4 +- .../vm_1_4_1/implementation/execution.rs | 1 + .../vm_1_4_1/implementation/statistics.rs | 2 +- .../src/versions/vm_1_4_1/tests/block_tip.rs | 284 - .../src/versions/vm_1_4_1/tests/bootloader.rs | 56 - .../vm_1_4_1/tests/bytecode_publishing.rs | 43 - .../versions/vm_1_4_1/tests/call_tracer.rs | 92 - .../src/versions/vm_1_4_1/tests/circuits.rs | 69 - .../src/versions/vm_1_4_1/tests/default_aa.rs | 78 - .../src/versions/vm_1_4_1/tests/gas_limit.rs | 45 - .../vm_1_4_1/tests/get_used_contracts.rs | 109 - .../vm_1_4_1/tests/invalid_bytecode.rs | 120 - .../vm_1_4_1/tests/is_write_initial.rs | 48 - .../vm_1_4_1/tests/l1_tx_execution.rs | 189 - .../src/versions/vm_1_4_1/tests/mod.rs | 23 - .../versions/vm_1_4_1/tests/nonce_holder.rs | 188 - .../versions/vm_1_4_1/tests/precompiles.rs | 136 - .../src/versions/vm_1_4_1/tests/refunds.rs | 166 - .../versions/vm_1_4_1/tests/require_eip712.rs | 165 - .../src/versions/vm_1_4_1/tests/rollbacks.rs | 263 - .../vm_1_4_1/tests/simple_execution.rs | 81 - .../vm_1_4_1/tests/tester/inner_state.rs | 131 - .../src/versions/vm_1_4_1/tests/tester/mod.rs | 9 - .../tests/tester/transaction_test_info.rs | 217 - .../vm_1_4_1/tests/tester/vm_tester.rs | 298 - .../vm_1_4_1/tests/tracing_execution_error.rs | 54 - .../src/versions/vm_1_4_1/tests/upgrade.rs | 355 - .../src/versions/vm_1_4_1/tests/utils.rs | 121 - .../vm_1_4_1/tracers/pubdata_tracer.rs | 3 +- .../vm_1_4_1/types/internals/pubdata.rs | 2 +- core/lib/multivm/src/versions/vm_1_4_1/vm.rs | 31 +- .../vm_1_4_2/bootloader_state/utils.rs | 4 +- .../vm_1_4_2/implementation/execution.rs | 1 + .../vm_1_4_2/implementation/statistics.rs | 2 +- .../src/versions/vm_1_4_2/tests/block_tip.rs | 399 - .../vm_1_4_2/tests/bytecode_publishing.rs | 40 - .../versions/vm_1_4_2/tests/call_tracer.rs | 92 - .../src/versions/vm_1_4_2/tests/gas_limit.rs | 44 - .../vm_1_4_2/tests/get_used_contracts.rs | 109 - .../vm_1_4_2/tests/invalid_bytecode.rs | 120 - .../src/versions/vm_1_4_2/tests/l2_blocks.rs | 437 - .../src/versions/vm_1_4_2/tests/mod.rs | 23 - .../versions/vm_1_4_2/tests/nonce_holder.rs | 187 - .../versions/vm_1_4_2/tests/precompiles.rs | 135 - .../vm_1_4_2/tests/prestate_tracer.rs | 143 - .../src/versions/vm_1_4_2/tests/refunds.rs | 169 - .../src/versions/vm_1_4_2/tests/rollbacks.rs | 263 - .../vm_1_4_2/tests/tester/inner_state.rs | 131 - .../src/versions/vm_1_4_2/tests/tester/mod.rs | 9 - .../tests/tester/transaction_test_info.rs | 217 - .../vm_1_4_2/tests/tester/vm_tester.rs | 298 - .../src/versions/vm_1_4_2/tests/utils.rs | 121 - .../vm_1_4_2/tracers/pubdata_tracer.rs | 3 +- .../vm_1_4_2/types/internals/pubdata.rs | 2 +- core/lib/multivm/src/versions/vm_1_4_2/vm.rs | 27 +- .../bootloader_state/utils.rs | 4 +- .../implementation/execution.rs | 1 + .../implementation/statistics.rs | 2 +- .../vm_boojum_integration/tests/bootloader.rs | 56 - .../tests/bytecode_publishing.rs | 43 - .../tests/call_tracer.rs | 92 - .../vm_boojum_integration/tests/circuits.rs | 66 - .../vm_boojum_integration/tests/default_aa.rs | 76 - .../vm_boojum_integration/tests/gas_limit.rs | 45 - .../tests/get_used_contracts.rs | 109 - .../tests/invalid_bytecode.rs | 120 - .../tests/is_write_initial.rs | 48 - .../tests/l1_tx_execution.rs | 139 - .../vm_boojum_integration/tests/l2_blocks.rs | 437 - .../vm_boojum_integration/tests/mod.rs | 22 - .../tests/nonce_holder.rs | 188 - .../tests/precompiles.rs | 136 - .../vm_boojum_integration/tests/refunds.rs | 167 - .../tests/require_eip712.rs | 165 - .../vm_boojum_integration/tests/rollbacks.rs | 263 - .../tests/simple_execution.rs | 81 - .../tests/tester/inner_state.rs | 130 - .../vm_boojum_integration/tests/tester/mod.rs | 7 - .../tests/tester/transaction_test_info.rs | 217 - .../tests/tester/vm_tester.rs | 295 - .../tests/tracing_execution_error.rs | 54 - .../vm_boojum_integration/tests/upgrade.rs | 362 - .../vm_boojum_integration/tests/utils.rs | 111 - .../tracers/pubdata_tracer.rs | 3 +- .../types/internals/pubdata.rs | 2 +- .../src/versions/vm_boojum_integration/vm.rs | 27 +- .../vm_fast/bootloader_state/utils.rs | 4 +- .../src/versions/vm_fast/circuits_tracer.rs | 4 +- core/lib/multivm/src/versions/vm_fast/mod.rs | 4 +- .../multivm/src/versions/vm_fast/pubdata.rs | 2 +- .../src/versions/vm_fast/tests/block_tip.rs | 392 +- .../src/versions/vm_fast/tests/bootloader.rs | 50 +- .../vm_fast/tests/bytecode_publishing.rs | 38 +- .../src/versions/vm_fast/tests/call_tracer.rs | 92 - .../src/versions/vm_fast/tests/circuits.rs | 74 +- .../src/versions/vm_fast/tests/code_oracle.rs | 247 +- .../src/versions/vm_fast/tests/default_aa.rs | 81 +- .../src/versions/vm_fast/tests/gas_limit.rs | 39 +- .../vm_fast/tests/get_used_contracts.rs | 229 +- .../vm_fast/tests/invalid_bytecode.rs | 120 - .../vm_fast/tests/is_write_initial.rs | 46 +- .../versions/vm_fast/tests/l1_tx_execution.rs | 196 +- .../src/versions/vm_fast/tests/l2_blocks.rs | 421 +- .../multivm/src/versions/vm_fast/tests/mod.rs | 190 +- .../versions/vm_fast/tests/nonce_holder.rs | 180 +- .../src/versions/vm_fast/tests/precompiles.rs | 113 +- .../versions/vm_fast/tests/prestate_tracer.rs | 143 - .../src/versions/vm_fast/tests/refunds.rs | 217 +- .../versions/vm_fast/tests/require_eip712.rs | 178 +- .../src/versions/vm_fast/tests/rollbacks.rs | 174 +- .../src/versions/vm_fast/tests/secp256r1.rs | 6 + .../vm_fast/tests/simple_execution.rs | 74 +- .../src/versions/vm_fast/tests/storage.rs | 131 +- .../src/versions/vm_fast/tests/tester/mod.rs | 6 - .../tests/tester/transaction_test_info.rs | 242 - .../vm_fast/tests/tester/vm_tester.rs | 231 - .../vm_fast/tests/tracing_execution_error.rs | 53 +- .../src/versions/vm_fast/tests/transfer.rs | 213 +- .../src/versions/vm_fast/tests/upgrade.rs | 340 +- .../src/versions/vm_fast/tests/utils.rs | 137 - core/lib/multivm/src/versions/vm_fast/vm.rs | 277 +- .../vm_latest/bootloader_state/state.rs | 63 +- .../vm_latest/bootloader_state/utils.rs | 123 +- .../vm_latest/implementation/execution.rs | 10 +- .../vm_latest/implementation/statistics.rs | 2 +- .../versions/vm_latest/implementation/tx.rs | 7 +- .../vm_latest/old_vm/oracles/decommitter.rs | 76 +- .../src/versions/vm_latest/tests/block_tip.rs | 429 +- .../versions/vm_latest/tests/bootloader.rs | 55 +- .../vm_latest/tests/bytecode_publishing.rs | 40 +- .../versions/vm_latest/tests/call_tracer.rs | 37 +- .../src/versions/vm_latest/tests/circuits.rs | 75 +- .../versions/vm_latest/tests/code_oracle.rs | 277 +- .../versions/vm_latest/tests/default_aa.rs | 85 +- .../versions/vm_latest/tests/evm_emulator.rs | 507 ++ .../src/versions/vm_latest/tests/gas_limit.rs | 45 +- .../vm_latest/tests/get_used_contracts.rs | 241 +- .../vm_latest/tests/is_write_initial.rs | 48 +- .../versions/vm_latest/tests/l1_messenger.rs | 314 +- .../vm_latest/tests/l1_tx_execution.rs | 193 +- .../src/versions/vm_latest/tests/l2_blocks.rs | 430 +- .../src/versions/vm_latest/tests/migration.rs | 51 - .../src/versions/vm_latest/tests/mod.rs | 281 +- .../versions/vm_latest/tests/nonce_holder.rs | 190 +- .../versions/vm_latest/tests/precompiles.rs | 139 +- .../vm_latest/tests/prestate_tracer.rs | 51 +- .../src/versions/vm_latest/tests/refunds.rs | 230 +- .../vm_latest/tests/require_eip712.rs | 168 +- .../src/versions/vm_latest/tests/rollbacks.rs | 200 +- .../src/versions/vm_latest/tests/secp256r1.rs | 9 + .../src/versions/vm_latest/tests/sekp256r1.rs | 74 - .../vm_latest/tests/simple_execution.rs | 77 +- .../src/versions/vm_latest/tests/storage.rs | 186 +- .../vm_latest/tests/tester/inner_state.rs | 131 - .../versions/vm_latest/tests/tester/mod.rs | 9 - .../vm_latest/tests/tester/vm_tester.rs | 335 - .../tests/tracing_execution_error.rs | 53 +- .../src/versions/vm_latest/tests/transfer.rs | 218 +- .../src/versions/vm_latest/tests/upgrade.rs | 351 +- .../src/versions/vm_latest/tests/utils.rs | 150 - .../vm_latest/tracers/default_tracers.rs | 18 +- .../vm_latest/tracers/evm_deploy_tracer.rs | 103 + .../src/versions/vm_latest/tracers/mod.rs | 2 + .../vm_latest/tracers/pubdata_tracer.rs | 40 +- .../versions/vm_latest/types/internals/mod.rs | 2 - .../vm_latest/types/internals/pubdata.rs | 339 - .../types/internals/transaction_data.rs | 19 +- .../vm_latest/types/internals/vm_state.rs | 25 +- .../src/versions/vm_latest/utils/mod.rs | 33 +- .../vm_latest/utils/transaction_encoding.rs | 4 +- core/lib/multivm/src/versions/vm_latest/vm.rs | 69 +- core/lib/multivm/src/versions/vm_m5/utils.rs | 9 +- core/lib/multivm/src/versions/vm_m5/vm.rs | 40 +- core/lib/multivm/src/versions/vm_m6/utils.rs | 9 +- core/lib/multivm/src/versions/vm_m6/vm.rs | 66 +- .../src/versions/vm_m6/vm_with_bootloader.rs | 28 +- .../bootloader_state/utils.rs | 4 +- .../implementation/execution.rs | 1 + .../implementation/statistics.rs | 2 +- .../tests/bootloader.rs | 54 - .../tests/bytecode_publishing.rs | 37 - .../tests/call_tracer.rs | 87 - .../tests/default_aa.rs | 70 - .../vm_refunds_enhancement/tests/gas_limit.rs | 45 - .../tests/get_used_contracts.rs | 104 - .../tests/invalid_bytecode.rs | 120 - .../tests/is_write_initial.rs | 42 - .../tests/l1_tx_execution.rs | 125 - .../vm_refunds_enhancement/tests/l2_blocks.rs | 498 -- .../vm_refunds_enhancement/tests/mod.rs | 20 - .../tests/nonce_holder.rs | 181 - .../tests/require_eip712.rs | 163 - .../vm_refunds_enhancement/tests/rollbacks.rs | 259 - .../tests/simple_execution.rs | 77 - .../tests/tester/inner_state.rs | 127 - .../tests/tester/mod.rs | 7 - .../tests/tester/transaction_test_info.rs | 217 - .../tests/tester/vm_tester.rs | 300 - .../tests/tracing_execution_error.rs | 53 - .../vm_refunds_enhancement/tests/upgrade.rs | 342 - .../vm_refunds_enhancement/tests/utils.rs | 106 - .../src/versions/vm_refunds_enhancement/vm.rs | 31 +- .../bootloader_state/utils.rs | 4 +- .../implementation/execution.rs | 1 + .../implementation/statistics.rs | 2 +- .../vm_virtual_blocks/tests/bootloader.rs | 53 - .../tests/bytecode_publishing.rs | 37 - .../vm_virtual_blocks/tests/call_tracer.rs | 88 - .../vm_virtual_blocks/tests/default_aa.rs | 70 - .../vm_virtual_blocks/tests/gas_limit.rs | 45 - .../tests/get_used_contracts.rs | 106 - .../tests/invalid_bytecode.rs | 120 - .../tests/is_write_initial.rs | 42 - .../tests/l1_tx_execution.rs | 125 - .../vm_virtual_blocks/tests/l2_blocks.rs | 502 -- .../versions/vm_virtual_blocks/tests/mod.rs | 20 - .../vm_virtual_blocks/tests/nonce_holder.rs | 182 - .../vm_virtual_blocks/tests/refunds.rs | 152 - .../vm_virtual_blocks/tests/require_eip712.rs | 161 - .../tests/simple_execution.rs | 77 - .../tests/tester/inner_state.rs | 119 - .../vm_virtual_blocks/tests/tester/mod.rs | 7 - .../tests/tester/transaction_test_info.rs | 216 - .../tests/tester/vm_tester.rs | 291 - .../tests/tracing_execution_error.rs | 52 - .../vm_virtual_blocks/tests/upgrade.rs | 344 - .../versions/vm_virtual_blocks/tests/utils.rs | 106 - .../src/versions/vm_virtual_blocks/vm.rs | 29 +- core/lib/multivm/src/vm_instance.rs | 51 +- core/lib/object_store/src/file.rs | 1 - core/lib/object_store/src/raw.rs | 2 - core/lib/protobuf_config/Cargo.toml | 1 + core/lib/protobuf_config/src/api.rs | 8 +- core/lib/protobuf_config/src/chain.rs | 1 + core/lib/protobuf_config/src/consensus.rs | 2 + core/lib/protobuf_config/src/contracts.rs | 12 +- core/lib/protobuf_config/src/da_client.rs | 53 +- core/lib/protobuf_config/src/en.rs | 11 +- core/lib/protobuf_config/src/eth.rs | 32 +- core/lib/protobuf_config/src/genesis.rs | 7 + core/lib/protobuf_config/src/lib.rs | 23 +- .../protobuf_config/src/proof_data_handler.rs | 16 +- .../src/proto/config/api.proto | 3 +- .../src/proto/config/da_client.proto | 20 +- .../protobuf_config/src/proto/config/en.proto | 1 + .../src/proto/config/eth_sender.proto | 3 +- .../src/proto/config/genesis.proto | 3 +- .../src/proto/config/prover.proto | 3 +- .../src/proto/config/prover_autoscaler.proto | 59 + .../src/proto/config/secrets.proto | 1 + .../src/proto/core/consensus.proto | 3 + .../protobuf_config/src/prover_autoscaler.rs | 240 + core/lib/protobuf_config/src/secrets.rs | 54 +- core/lib/protobuf_config/src/tests.rs | 17 +- core/lib/prover_interface/src/api.rs | 2 +- core/lib/prover_interface/src/inputs.rs | 113 +- core/lib/snapshots_applier/src/tests/utils.rs | 2 +- core/lib/state/src/cache/lru_cache.rs | 7 + core/lib/state/src/postgres/mod.rs | 185 +- core/lib/state/src/postgres/tests.rs | 4 +- core/lib/state/src/test_utils.rs | 2 +- core/lib/tee_verifier/Cargo.toml | 7 +- core/lib/tee_verifier/src/lib.rs | 162 +- core/lib/types/Cargo.toml | 3 +- core/lib/types/src/api/en.rs | 6 + core/lib/types/src/api/mod.rs | 25 +- core/lib/types/src/api/state_override.rs | 11 + core/lib/types/src/block.rs | 25 +- core/lib/types/src/commitment/mod.rs | 121 +- core/lib/types/src/commitment/tests/mod.rs | 5 + .../tests/post_boojum_1_4_1_test.json | 36 +- .../tests/post_boojum_1_4_2_test.json | 36 +- .../tests/post_boojum_1_5_0_test.json | 204 +- .../post_boojum_1_5_0_test_with_evm.json | 440 + .../commitment/tests/post_gateway_test.json | 210 +- core/lib/types/src/debug_flat_call.rs | 17 + core/lib/types/src/fee_model.rs | 463 +- core/lib/types/src/l2/mod.rs | 4 +- core/lib/types/src/l2_to_l1_log.rs | 15 +- core/lib/types/src/lib.rs | 38 +- core/lib/types/src/protocol_upgrade.rs | 38 +- core/lib/types/src/storage/mod.rs | 11 +- .../types/src/storage/witness_block_state.rs | 2 +- core/lib/types/src/system_contracts.rs | 50 +- core/lib/types/src/transaction_request.rs | 56 +- core/lib/types/src/tx/execute.rs | 26 +- core/lib/utils/src/bytecode.rs | 61 + core/lib/utils/src/env.rs | 40 +- core/lib/vm_executor/Cargo.toml | 3 + core/lib/vm_executor/src/batch/factory.rs | 82 +- core/lib/vm_executor/src/lib.rs | 2 + core/lib/vm_executor/src/oneshot/block.rs | 55 +- core/lib/vm_executor/src/oneshot/contracts.rs | 100 +- core/lib/vm_executor/src/oneshot/env.rs | 80 +- core/lib/vm_executor/src/oneshot/mock.rs | 1 + core/lib/vm_executor/src/oneshot/mod.rs | 13 +- core/lib/vm_executor/src/storage.rs | 27 +- core/lib/vm_executor/src/testonly.rs | 45 + core/lib/vm_interface/src/executor.rs | 3 +- core/lib/vm_interface/src/lib.rs | 13 +- core/lib/vm_interface/src/pubdata/mod.rs | 90 + .../lib/vm_interface/src/storage/in_memory.rs | 2 +- core/lib/vm_interface/src/storage/view.rs | 10 + .../src/types/inputs/execution_mode.rs | 19 + .../src/types/inputs/l1_batch_env.rs | 20 +- core/lib/vm_interface/src/types/inputs/mod.rs | 2 +- .../src/types/inputs/system_env.rs | 4 +- .../src/types/outputs/execution_result.rs | 6 + .../src/types/outputs/finished_l1batch.rs | 1 + .../lib/vm_interface/src/types/outputs/mod.rs | 13 + .../src/types/outputs/statistic.rs | 3 +- core/lib/vm_interface/src/utils/dump.rs | 37 +- core/lib/vm_interface/src/utils/mod.rs | 4 +- core/lib/vm_interface/src/utils/shadow.rs | 377 +- core/lib/vm_interface/src/vm.rs | 25 +- core/lib/web3_decl/src/namespaces/en.rs | 6 + core/lib/web3_decl/src/namespaces/eth.rs | 8 +- core/lib/web3_decl/src/types.rs | 4 +- core/lib/zksync_core_leftovers/src/lib.rs | 6 - core/node/api_server/Cargo.toml | 1 + .../src/execution_sandbox/execute.rs | 17 +- .../api_server/src/execution_sandbox/mod.rs | 19 +- .../api_server/src/execution_sandbox/tests.rs | 92 +- core/node/api_server/src/testonly.rs | 394 +- .../src/tx_sender/gas_estimation.rs | 55 +- core/node/api_server/src/tx_sender/mod.rs | 37 +- core/node/api_server/src/tx_sender/result.rs | 30 +- core/node/api_server/src/tx_sender/tests.rs | 805 -- .../api_server/src/tx_sender/tests/call.rs | 253 + .../src/tx_sender/tests/gas_estimation.rs | 483 ++ .../api_server/src/tx_sender/tests/mod.rs | 166 + .../api_server/src/tx_sender/tests/send_tx.rs | 300 + core/node/api_server/src/utils.rs | 36 + .../web3/backend_jsonrpsee/namespaces/en.rs | 9 + .../web3/backend_jsonrpsee/namespaces/eth.rs | 10 +- .../web3/backend_jsonrpsee/namespaces/zks.rs | 2 +- core/node/api_server/src/web3/mod.rs | 80 +- .../api_server/src/web3/namespaces/debug.rs | 88 +- .../node/api_server/src/web3/namespaces/en.rs | 35 + .../api_server/src/web3/namespaces/eth.rs | 56 +- .../api_server/src/web3/namespaces/zks.rs | 42 +- core/node/api_server/src/web3/state.rs | 80 +- core/node/api_server/src/web3/testonly.rs | 136 +- core/node/api_server/src/web3/tests/debug.rs | 29 +- .../node/api_server/src/web3/tests/filters.rs | 4 +- core/node/api_server/src/web3/tests/mod.rs | 316 +- .../api_server/src/web3/tests/unstable.rs | 7 +- core/node/api_server/src/web3/tests/vm.rs | 499 +- core/node/api_server/src/web3/tests/ws.rs | 6 +- .../src/base_token_l1_behaviour.rs | 8 +- .../src/base_token_ratio_persister.rs | 3 + .../src/base_token_ratio_provider.rs | 2 +- core/node/base_token_adjuster/src/metrics.rs | 1 + core/node/block_reverter/src/tests.rs | 3 +- core/node/commitment_generator/Cargo.toml | 1 + core/node/commitment_generator/src/lib.rs | 58 +- core/node/commitment_generator/src/utils.rs | 17 +- core/node/consensus/src/batch.rs | 275 - core/node/consensus/src/config.rs | 2 - core/node/consensus/src/en.rs | 128 +- core/node/consensus/src/era.rs | 14 +- core/node/consensus/src/lib.rs | 4 - core/node/consensus/src/mn.rs | 22 +- core/node/consensus/src/registry/abi.rs | 3 +- core/node/consensus/src/registry/tests.rs | 12 +- core/node/consensus/src/storage/connection.rs | 238 +- core/node/consensus/src/storage/store.rs | 229 +- core/node/consensus/src/storage/testonly.rs | 121 +- core/node/consensus/src/testonly.rs | 110 +- core/node/consensus/src/tests/attestation.rs | 28 +- core/node/consensus/src/tests/batch.rs | 124 - core/node/consensus/src/tests/mod.rs | 331 +- core/node/consensus/src/vm.rs | 17 +- core/node/consistency_checker/src/lib.rs | 16 +- .../node/consistency_checker/src/tests/mod.rs | 4 +- core/node/da_clients/Cargo.toml | 3 + core/node/da_clients/src/avail/client.rs | 226 +- core/node/da_clients/src/avail/sdk.rs | 100 +- core/node/db_pruner/src/tests.rs | 2 +- .../eth_sender/src/aggregated_operations.rs | 8 +- core/node/eth_sender/src/aggregator.rs | 8 +- core/node/eth_sender/src/eth_fees_oracle.rs | 20 +- core/node/eth_sender/src/eth_tx_aggregator.rs | 69 +- core/node/eth_sender/src/eth_tx_manager.rs | 14 +- core/node/eth_sender/src/publish_criterion.rs | 4 +- core/node/eth_sender/src/tester.rs | 18 +- core/node/eth_sender/src/tests.rs | 154 +- core/node/eth_sender/src/zksync_functions.rs | 12 + core/node/eth_watch/src/client.rs | 4 +- core/node/eth_watch/src/tests.rs | 18 +- core/node/fee_model/Cargo.toml | 2 - .../src/l1_gas_price/gas_adjuster/mod.rs | 13 +- .../src/l1_gas_price/gas_adjuster/tests.rs | 6 +- core/node/fee_model/src/l1_gas_price/mod.rs | 2 +- core/node/fee_model/src/lib.rs | 459 +- core/node/genesis/src/lib.rs | 23 +- core/node/genesis/src/utils.rs | 3 +- .../src/api_server/metrics.rs | 2 + .../metadata_calculator/src/api_server/mod.rs | 161 +- .../src/api_server/tests.rs | 58 + core/node/metadata_calculator/src/helpers.rs | 56 +- core/node/metadata_calculator/src/lib.rs | 53 + core/node/metadata_calculator/src/updater.rs | 4 - core/node/node_framework/Cargo.toml | 1 - .../layers/base_token/coingecko_client.rs | 55 - .../layers/base_token/forced_price_client.rs | 52 - .../implementations/layers/base_token/mod.rs | 93 +- .../no_op_external_price_api_client.rs | 45 - .../implementations/layers/gas_adjuster.rs | 3 +- .../src/implementations/layers/l1_gas.rs | 28 +- .../layers/metadata_calculator.rs | 67 +- .../src/implementations/layers/mod.rs | 1 - .../layers/proof_data_handler.rs | 8 +- .../layers/state_keeper/external_io.rs | 6 + .../layers/state_keeper/mempool_io.rs | 27 +- .../layers/state_keeper/output_handler.rs | 41 +- .../layers/sync_state_updater.rs | 8 + .../layers/tee_verifier_input_producer.rs | 69 - .../web3_api/server/bridge_addresses.rs | 48 + .../web3_api/{server.rs => server/mod.rs} | 71 +- .../layers/web3_api/server/sealed_l2_block.rs | 50 + .../layers/web3_api/tx_sender.rs | 12 +- core/node/node_framework/src/service/error.rs | 27 +- core/node/node_framework/src/service/mod.rs | 2 +- core/node/node_sync/Cargo.toml | 1 + core/node/node_sync/src/external_io.rs | 253 +- core/node/node_sync/src/fetcher.rs | 11 +- core/node/node_sync/src/genesis.rs | 23 +- core/node/node_sync/src/sync_action.rs | 12 + core/node/node_sync/src/sync_state.rs | 1 + core/node/node_sync/src/tests.rs | 115 +- core/node/proof_data_handler/Cargo.toml | 3 + core/node/proof_data_handler/src/errors.rs | 25 +- core/node/proof_data_handler/src/lib.rs | 32 +- .../src/request_processor.rs | 12 +- .../src/tee_request_processor.rs | 122 +- core/node/proof_data_handler/src/tests.rs | 164 +- core/node/shared_metrics/src/lib.rs | 2 - core/node/state_keeper/src/executor/mod.rs | 2 +- .../state_keeper/src/executor/tests/tester.rs | 37 +- core/node/state_keeper/src/io/common/mod.rs | 4 +- core/node/state_keeper/src/io/common/tests.rs | 8 +- core/node/state_keeper/src/io/mempool.rs | 131 +- core/node/state_keeper/src/io/mod.rs | 16 +- core/node/state_keeper/src/io/persistence.rs | 78 +- .../io/seal_logic/l2_block_seal_subtasks.rs | 7 +- .../state_keeper/src/io/seal_logic/mod.rs | 5 +- core/node/state_keeper/src/io/tests/mod.rs | 106 +- core/node/state_keeper/src/io/tests/tester.rs | 25 +- core/node/state_keeper/src/keeper.rs | 37 +- core/node/state_keeper/src/mempool_actor.rs | 31 +- .../state_keeper/src/seal_criteria/mod.rs | 3 + core/node/state_keeper/src/testonly/mod.rs | 9 +- .../src/testonly/test_batch_executor.rs | 8 +- core/node/state_keeper/src/tests/mod.rs | 6 +- .../src/updates/l1_batch_updates.rs | 3 + .../src/updates/l2_block_updates.rs | 38 +- core/node/state_keeper/src/updates/mod.rs | 24 +- .../tee_verifier_input_producer/Cargo.toml | 27 - .../tee_verifier_input_producer/README.md | 3 - .../tee_verifier_input_producer/src/lib.rs | 261 - .../src/metrics.rs | 18 - core/node/test_utils/src/lib.rs | 13 +- core/node/vm_runner/src/impls/bwip.rs | 18 + core/node/vm_runner/src/process.rs | 1 + core/node/vm_runner/src/storage.rs | 9 +- core/node/vm_runner/src/tests/mod.rs | 1 + .../vm_runner/src/tests/output_handler.rs | 2 +- core/tests/recovery-test/src/index.ts | 10 +- core/tests/recovery-test/src/utils.ts | 8 +- .../tests/snapshot-recovery.test.ts | 6 +- core/tests/revert-test/tests/utils.ts | 20 +- core/tests/test_account/src/lib.rs | 19 +- core/tests/ts-integration/src/utils.ts | 10 +- .../ts-integration/tests/api/web3.test.ts | 6 +- .../ts-integration/tests/base-token.test.ts | 2 +- .../ts-integration/tests/contracts.test.ts | 2 +- core/tests/ts-integration/tests/fees.test.ts | 2 +- core/tests/upgrade-test/tests/upgrade.test.ts | 39 +- core/tests/upgrade-test/tests/utils.ts | 16 +- core/tests/vm-benchmark/Cargo.toml | 1 + core/tests/vm-benchmark/benches/iai.rs | 1 + .../src/bin/compare_iai_results.rs | 31 +- .../src/bin/instruction_counts.rs | 9 +- .../vm-benchmark/src/instruction_counter.rs | 1 - core/tests/vm-benchmark/src/vm.rs | 185 +- docker-compose-gpu-runner-cuda-12-0.yml | 21 +- docker-compose-gpu-runner.yml | 7 +- docker-compose-runner-nightly.yml | 5 +- docker-compose-unit-tests.yml | 1 - docker-compose.yml | 13 +- docker/Makefile | 18 +- docker/contract-verifier/Dockerfile | 11 +- docker/contract-verifier/install-all-solc.sh | 2 +- docker/external-node/Dockerfile | 7 +- docker/prover-autoscaler/Dockerfile | 25 + docker/server-v2/Dockerfile | 7 +- ...rfile => 22.04_amd64_cuda_11_8.Dockerfile} | 25 +- ...rfile => 22.04_amd64_cuda_12_0.Dockerfile} | 23 +- docker/zk-environment/Dockerfile | 2 +- docs/guides/build-docker.md | 2 + docs/guides/external-node/00_quick_start.md | 7 +- .../configs/mainnet_consensus_config.yaml | 2 +- .../configs/testnet_consensus_config.yaml | 2 +- .../provisioning/dashboards/Consensus.json | 12 +- .../provisioning/dashboards/General.json | 213 +- .../provisioning/dashboards/default.yml | 1 + docs/specs/README.md | 2 +- docs/specs/zk_chains/README.md | 2 +- docs/specs/zk_chains/gateway.md | 1 + docs/specs/zk_chains/hyperbridges.md | 41 - docs/specs/zk_chains/interop.md | 49 + docs/specs/zk_chains/shared_bridge.md | 143 +- .../contracts/mock-evm/mock-evm.sol | 243 + etc/env/base/contracts.toml | 4 +- etc/env/base/eth_sender.toml | 4 +- etc/env/base/rust.toml | 3 +- etc/env/consensus_config.yaml | 1 + etc/env/en_consensus_config.yaml | 1 + etc/env/file_based/general.yaml | 9 +- etc/env/file_based/genesis.yaml | 7 +- etc/env/file_based/overrides/mainnet.yaml | 3 +- etc/env/file_based/overrides/testnet.yaml | 3 +- etc/lint-config/ignore.yaml | 3 +- etc/multivm_bootloaders/vm_gateway/commit | 1 + .../fee_estimate.yul/fee_estimate.yul.zbin | Bin 75168 -> 75168 bytes .../gas_test.yul/gas_test.yul.zbin | Bin 71264 -> 71264 bytes .../playground_batch.yul.zbin | Bin 75360 -> 75360 bytes .../proved_batch.yul/proved_batch.yul.zbin | Bin 71776 -> 71776 bytes etc/nix/tee_prover.nix | 17 +- etc/nix/zksync.nix | 8 +- .../1728066632-protocol-defense/common.json | 5 + .../stage/crypto.json | 6 + .../stage/facetCuts.json | 198 + .../stage/facets.json | 18 + .../stage/l2Upgrade.json | 394 + .../stage/transactions.json | 253 + etc/utils/src/index.ts | 1 + flake.nix | 11 +- infrastructure/protocol-upgrade/README.md | 18 +- .../protocol-upgrade/src/transaction.ts | 462 +- infrastructure/zk/src/docker.ts | 6 +- infrastructure/zk/src/fmt.ts | 2 +- infrastructure/zk/src/lint.ts | 12 +- prover/Cargo.lock | 1355 ++- prover/Cargo.toml | 29 +- .../crates/bin/prover_autoscaler/Cargo.toml | 46 + .../crates/bin/prover_autoscaler/src/agent.rs | 130 + .../prover_autoscaler/src/cluster_types.rs | 60 + .../bin/prover_autoscaler/src/global/mod.rs | 3 + .../prover_autoscaler/src/global/queuer.rs | 49 + .../prover_autoscaler/src/global/scaler.rs | 751 ++ .../prover_autoscaler/src/global/watcher.rs | 203 + .../bin/prover_autoscaler/src/k8s/mod.rs | 5 + .../bin/prover_autoscaler/src/k8s/scaler.rs | 42 + .../bin/prover_autoscaler/src/k8s/watcher.rs | 133 + .../crates/bin/prover_autoscaler/src/lib.rs | 6 + .../crates/bin/prover_autoscaler/src/main.rs | 146 + .../bin/prover_autoscaler/src/metrics.rs | 20 + .../bin/prover_autoscaler/src/task_wiring.rs | 72 + prover/crates/bin/witness_generator/README.md | 4 - .../src/rounds/basic_circuits/utils.rs | 154 +- .../crates/bin/witness_generator/src/utils.rs | 51 +- prover/crates/lib/keystore/src/keystore.rs | 1 + prover/crates/lib/prover_fri_types/src/lib.rs | 4 +- prover/docs/03_launch.md | 2 +- prover/docs/05_proving_batch.md | 48 +- yarn.lock | 1706 ++-- zk_toolbox/crates/common/src/term/spinner.rs | 51 - zk_toolbox/crates/common/src/wallets.rs | 64 - zk_toolbox/crates/zk_inception/build.rs | 11 - .../src/commands/chain/genesis.rs | 176 - zk_toolbox/crates/zk_supervisor/Cargo.toml | 32 - zk_toolbox/crates/zk_supervisor/README.md | 386 - zk_toolbox/crates/zk_supervisor/src/main.rs | 151 - zk_toolbox/rust-toolchain | 1 - zk_toolbox/zkup/README.md | 76 - zk_toolbox/zkup/install | 55 - zk_toolbox/zkup/zkup | 254 - {zk_toolbox => zkstack_cli}/CHANGELOG.md | 0 {zk_toolbox => zkstack_cli}/Cargo.lock | 333 +- {zk_toolbox => zkstack_cli}/Cargo.toml | 19 +- {zk_toolbox => zkstack_cli}/README.md | 104 +- .../crates/common/Cargo.toml | 0 .../crates/common/src/cmd.rs | 0 .../crates/common/src/config.rs | 0 zkstack_cli/crates/common/src/contracts.rs | 52 + .../crates/common/src/db.rs | 0 .../crates/common/src/docker.rs | 6 +- .../crates/common/src/ethereum.rs | 5 +- .../crates/common/src/external_node.rs | 0 .../crates/common/src/files.rs | 0 .../crates/common/src/forge.rs | 14 +- .../crates/common/src/git.rs | 0 .../crates/common/src/hardhat.rs | 0 .../crates/common/src/lib.rs | 3 +- .../crates/common/src/prerequisites.rs | 89 +- .../crates/common/src/prompt/confirm.rs | 0 .../crates/common/src/prompt/input.rs | 0 .../crates/common/src/prompt/mod.rs | 0 .../crates/common/src/prompt/select.rs | 0 .../crates/common/src/server.rs | 0 .../crates/common/src/term/error.rs | 0 .../crates/common/src/term/logger.rs | 2 +- .../crates/common/src/term/mod.rs | 0 zkstack_cli/crates/common/src/term/spinner.rs | 84 + .../crates/common/src/version.rs | 0 zkstack_cli/crates/common/src/wallets.rs | 102 + .../crates/common/src/withdraw.rs | 0 .../crates/common/src/yaml.rs | 0 .../crates/config/Cargo.toml | 0 .../crates/config/src/apps.rs | 4 +- .../crates/config/src/chain.rs | 12 +- .../crates/config/src/consensus_config.rs | 0 .../crates/config/src/consensus_secrets.rs | 4 +- .../crates/config/src/consts.rs | 2 - .../crates/config/src/contracts.rs | 15 +- .../crates/config/src/docker_compose.rs | 4 +- .../crates/config/src/ecosystem.rs | 27 +- .../crates/config/src/explorer.rs | 4 +- .../crates/config/src/explorer_compose.rs | 4 +- .../crates/config/src/external_node.rs | 4 +- .../crates/config/src/file_config.rs | 0 .../forge_interface/accept_ownership/mod.rs | 4 +- .../forge_interface/deploy_ecosystem/input.rs | 12 +- .../forge_interface/deploy_ecosystem/mod.rs | 0 .../deploy_ecosystem/output.rs | 6 +- .../deploy_gateway_ctm/input.rs | 4 +- .../forge_interface/deploy_gateway_ctm/mod.rs | 0 .../deploy_gateway_ctm/output.rs | 4 +- .../deploy_l2_contracts/input.rs | 4 +- .../deploy_l2_contracts/mod.rs | 0 .../deploy_l2_contracts/output.rs | 10 +- .../gateway_preparation/input.rs | 5 +- .../gateway_preparation/mod.rs | 0 .../gateway_preparation/output.rs | 4 +- .../crates/config/src/forge_interface/mod.rs | 0 .../src/forge_interface/paymaster/mod.rs | 6 +- .../forge_interface/register_chain/input.rs | 4 +- .../src/forge_interface/register_chain/mod.rs | 0 .../forge_interface/register_chain/output.rs | 4 +- .../src/forge_interface/script_params.rs | 0 .../setup_legacy_bridge/mod.rs | 4 +- .../crates/config/src/gateway.rs | 6 +- .../crates/config/src/general.rs | 4 +- .../crates/config/src/genesis.rs | 19 +- .../crates/config/src/lib.rs | 2 +- .../crates/config/src/manipulations.rs | 0 .../crates/config/src/portal.rs | 4 +- .../crates/config/src/secrets.rs | 12 +- .../crates/config/src/traits.rs | 8 +- .../crates/config/src/wallet_creation.rs | 0 .../crates/config/src/wallets.rs | 18 +- .../crates/git_version_macro/Cargo.toml | 0 .../crates/git_version_macro/src/lib.rs | 0 .../crates/types/Cargo.toml | 0 .../crates/types/src/base_token.rs | 0 .../crates/types/src/l1_network.rs | 0 .../crates/types/src/lib.rs | 2 +- .../crates/types/src/prover_mode.rs | 0 .../crates/types/src/token_info.rs | 0 .../crates/types/src/wallet_creation.rs | 0 .../crates/zkstack}/Cargo.toml | 45 +- .../crates/zkstack}/README.md | 70 +- .../zkstack}/abi/ConsensusRegistry.json | 0 zkstack_cli/crates/zkstack/build.rs | 149 + .../crates/zkstack/completion/_zkstack.zsh | 5151 ++++++++++++ .../crates/zkstack/completion/zkstack.fish | 767 ++ .../crates/zkstack/completion/zkstack.sh | 7255 +++++++++++++++++ .../crates/zkstack}/src/accept_ownership.rs | 17 +- .../zkstack/src/commands/args/autocomplete.rs | 13 + .../zkstack}/src/commands/args/containers.rs | 0 .../crates/zkstack}/src/commands/args/mod.rs | 2 + .../zkstack}/src/commands/args/run_server.rs | 0 .../zkstack}/src/commands/args/update.rs | 0 .../zkstack/src/commands/autocomplete.rs | 52 + .../commands/chain/accept_chain_ownership.rs | 42 + .../commands/chain/args/build_transactions.rs | 0 .../src/commands/chain/args/create.rs | 44 +- .../src/commands/chain/args/genesis.rs | 65 +- .../src/commands/chain/args/init/configs.rs | 64 +- .../src/commands/chain/args/init/mod.rs | 110 + .../zkstack}/src/commands/chain/args/mod.rs | 0 .../src/commands/chain/build_transactions.rs | 7 +- .../zkstack}/src/commands/chain/common.rs | 58 +- .../src/commands/chain/convert_to_gateway.rs | 17 +- .../zkstack}/src/commands/chain/create.rs | 3 + .../src/commands/chain/deploy_l2_contracts.rs | 35 +- .../src/commands/chain/deploy_paymaster.rs | 5 +- .../src/commands/chain/genesis/database.rs | 118 + .../zkstack/src/commands/chain/genesis/mod.rs | 92 + .../src/commands/chain/genesis/server.rs | 47 + .../src/commands/chain/init/configs.rs | 108 + .../zkstack/src/commands/chain/init/mod.rs | 130 +- .../commands/chain/migrate_from_gateway.rs | 14 +- .../src/commands/chain/migrate_to_gateway.rs | 27 +- .../crates/zkstack}/src/commands/chain/mod.rs | 48 +- .../src/commands/chain/register_chain.rs | 96 + .../chain/set_token_multiplier_setter.rs | 11 +- .../src/commands/chain/setup_legacy_bridge.rs | 5 +- .../zkstack/src/commands/consensus/conv.rs | 47 + .../zkstack/src/commands/consensus/mod.rs | 96 +- .../src/commands/consensus/proto/mod.proto | 9 + .../src/commands/consensus/proto/mod.rs | 6 + .../zkstack/src/commands/consensus/tests.rs | 19 + .../zkstack}/src/commands/containers.rs | 12 - .../commands/contract_verifier/args/init.rs | 0 .../commands/contract_verifier/args/mod.rs | 0 .../contract_verifier/args/releases.rs | 37 +- .../src/commands/contract_verifier/init.rs | 3 +- .../src/commands/contract_verifier/mod.rs | 0 .../src/commands/contract_verifier/run.rs | 0 .../src/commands/dev}/commands/clean/mod.rs | 8 +- .../commands/dev}/commands/config_writer.rs | 2 +- .../src/commands/dev}/commands/contracts.rs | 78 +- .../dev}/commands/database/args/mod.rs | 2 +- .../commands/database/args/new_migration.rs | 2 +- .../dev}/commands/database/check_sqlx_data.rs | 2 +- .../commands/dev}/commands/database/drop.rs | 2 +- .../dev}/commands/database/migrate.rs | 2 +- .../commands/dev}/commands/database/mod.rs | 2 +- .../dev}/commands/database/new_migration.rs | 2 +- .../dev}/commands/database/prepare.rs | 2 +- .../commands/dev}/commands/database/reset.rs | 2 +- .../commands/dev}/commands/database/setup.rs | 2 +- .../zkstack/src/commands/dev}/commands/fmt.rs | 15 +- .../src/commands/dev/commands/genesis.rs | 26 + .../src/commands/dev}/commands/lint.rs | 71 +- .../src/commands/dev}/commands/lint_utils.rs | 1 + .../zkstack/src/commands/dev}/commands/mod.rs | 2 + .../dev}/commands/prover/args/insert_batch.rs | 0 .../commands/prover/args/insert_version.rs | 0 .../commands/dev}/commands/prover/args/mod.rs | 0 .../src/commands/dev}/commands/prover/info.rs | 2 +- .../dev}/commands/prover/insert_batch.rs | 2 +- .../dev}/commands/prover/insert_version.rs | 2 +- .../src/commands/dev}/commands/prover/mod.rs | 0 .../commands/send_transactions/args/mod.rs | 2 +- .../dev}/commands/send_transactions/mod.rs | 2 +- .../src/commands/dev}/commands/snapshot.rs | 2 +- .../src/commands/dev}/commands/sql_fmt.rs | 4 +- .../src/commands/dev/commands/status/args.rs | 45 + .../src/commands/dev/commands/status/draw.rs | 88 + .../src/commands/dev/commands/status/mod.rs | 135 + .../src/commands/dev/commands/status/utils.rs | 26 + .../commands/dev}/commands/test/args/fees.rs | 4 +- .../dev}/commands/test/args/integration.rs | 6 +- .../commands/dev}/commands/test/args/mod.rs | 0 .../dev}/commands/test/args/recovery.rs | 6 +- .../dev}/commands/test/args/revert.rs | 4 +- .../commands/dev}/commands/test/args/rust.rs | 2 +- .../dev}/commands/test/args/upgrade.rs | 2 +- .../src/commands/dev}/commands/test/build.rs | 2 +- .../src/commands/dev}/commands/test/db.rs | 4 +- .../src/commands/dev}/commands/test/fees.rs | 2 +- .../dev}/commands/test/integration.rs | 4 +- .../dev}/commands/test/l1_contracts.rs | 2 +- .../commands/dev}/commands/test/loadtest.rs | 2 +- .../src/commands/dev}/commands/test/mod.rs | 6 +- .../src/commands/dev}/commands/test/prover.rs | 2 +- .../commands/dev}/commands/test/recovery.rs | 2 +- .../src/commands/dev}/commands/test/revert.rs | 2 +- .../src/commands/dev}/commands/test/rust.rs | 6 +- .../commands/dev}/commands/test/upgrade.rs | 2 +- .../src/commands/dev}/commands/test/utils.rs | 11 +- .../src/commands/dev}/commands/test/wallet.rs | 2 +- .../zkstack/src/commands/dev}/consts.rs | 0 .../crates/zkstack/src/commands/dev}/dals.rs | 6 +- .../zkstack/src/commands/dev}/defaults.rs | 0 .../zkstack/src/commands/dev}/messages.rs | 39 +- .../crates/zkstack/src/commands/dev/mod.rs | 70 + .../ecosystem/args/build_transactions.rs | 0 .../commands/ecosystem/args/change_default.rs | 0 .../src/commands/ecosystem/args/create.rs | 12 +- .../src/commands/ecosystem/args/init.rs | 55 +- .../src/commands/ecosystem/args/mod.rs | 0 .../commands/ecosystem/build_transactions.rs | 0 .../src/commands/ecosystem/change_default.rs | 0 .../zkstack}/src/commands/ecosystem/common.rs | 6 +- .../zkstack}/src/commands/ecosystem/create.rs | 0 .../src/commands/ecosystem/create_configs.rs | 0 .../zkstack}/src/commands/ecosystem/init.rs | 129 +- .../zkstack}/src/commands/ecosystem/mod.rs | 0 .../commands/ecosystem/setup_observability.rs | 0 .../zkstack}/src/commands/ecosystem/utils.rs | 0 .../zkstack}/src/commands/explorer/backend.rs | 0 .../zkstack}/src/commands/explorer/init.rs | 0 .../zkstack}/src/commands/explorer/mod.rs | 0 .../zkstack}/src/commands/explorer/run.rs | 0 .../src/commands/external_node/args/mod.rs | 0 .../external_node/args/prepare_configs.rs | 0 .../src/commands/external_node/args/run.rs | 0 .../src/commands/external_node/init.rs | 0 .../src/commands/external_node/mod.rs | 0 .../commands/external_node/prepare_configs.rs | 39 +- .../src/commands/external_node/run.rs | 0 .../crates/zkstack}/src/commands/mod.rs | 2 + .../crates/zkstack}/src/commands/portal.rs | 2 +- .../commands/prover/args/compressor_keys.rs | 0 .../zkstack}/src/commands/prover/args/init.rs | 66 +- .../commands/prover/args/init_bellman_cuda.rs | 33 +- .../zkstack}/src/commands/prover/args/mod.rs | 0 .../zkstack}/src/commands/prover/args/run.rs | 74 +- .../src/commands/prover/args/setup_keys.rs | 0 .../src/commands/prover/compressor_keys.rs | 16 +- .../zkstack}/src/commands/prover/gcs.rs | 0 .../zkstack}/src/commands/prover/init.rs | 0 .../src/commands/prover/init_bellman_cuda.rs | 0 .../zkstack}/src/commands/prover/mod.rs | 0 .../zkstack}/src/commands/prover/run.rs | 15 +- .../src/commands/prover/setup_keys.rs | 0 .../crates/zkstack}/src/commands/server.rs | 0 .../crates/zkstack}/src/commands/update.rs | 7 +- .../crates/zkstack}/src/consts.rs | 38 +- .../crates/zkstack}/src/defaults.rs | 0 .../crates/zkstack}/src/external_node.rs | 0 .../crates/zkstack}/src/main.rs | 103 +- .../crates/zkstack}/src/messages.rs | 33 +- .../crates/zkstack}/src/utils/consensus.rs | 42 +- .../crates/zkstack}/src/utils/forge.rs | 14 +- .../crates/zkstack}/src/utils/mod.rs | 0 .../crates/zkstack}/src/utils/ports.rs | 127 +- .../crates/zkstack}/src/utils/rocks_db.rs | 0 zkstack_cli/rust-toolchain | 1 + zkstack_cli/zkstackup/README.md | 70 + zkstack_cli/zkstackup/install | 120 + zkstack_cli/zkstackup/zkstackup | 272 + 1075 files changed, 42686 insertions(+), 38587 deletions(-) create mode 100644 .github/workflows/ci-prover-e2e.yml create mode 100644 bin/prover_checkers/batch_availability_checker create mode 100755 bin/prover_checkers/batch_l1_status_checker create mode 100644 bin/prover_checkers/kill_prover create mode 100755 bin/prover_checkers/prover_jobs_status_checker create mode 100755 bin/run_on_all_chains.sh delete mode 100755 bin/zkt create mode 100644 core/lib/basic_types/src/api_key.rs rename core/lib/{types => basic_types}/src/pubdata_da.rs (54%) create mode 100644 core/lib/config/src/configs/prover_autoscaler.rs create mode 100644 core/lib/constants/src/message_root.rs create mode 100644 core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json delete mode 100644 core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json rename core/lib/dal/.sqlx/{query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json => query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json} (53%) create mode 100644 core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json rename core/lib/dal/.sqlx/{query-ce6d5796dcc7c105fe3b3081b70327982ab744c7566645e9b0c69364f7021c5a.json => query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json} (75%) rename core/lib/dal/.sqlx/{query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json => query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json} (73%) rename core/lib/dal/.sqlx/{query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json => query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json} (71%) rename core/lib/dal/.sqlx/{query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json => query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json} (62%) rename core/lib/dal/.sqlx/{query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json => query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json} (50%) rename core/lib/dal/.sqlx/{query-269e5901aaa362ed011a2e968d2bc8cc8877e5d1d9c2d9b04953fa7d89155b40.json => query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json} (70%) delete mode 100644 core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json delete mode 100644 core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json delete mode 100644 core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json delete mode 100644 core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json rename core/lib/dal/.sqlx/{query-4d1d409b2405a4105feb140720abb480be336b68c127e442ee1bfd177597bd8b.json => query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json} (68%) rename core/lib/dal/.sqlx/{query-4e1db16b582aa347dc33fccd8d8afa60b5ca8ce096bfb79172b1b55264f6c987.json => query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json} (75%) rename core/lib/dal/.sqlx/{query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json => query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json} (76%) create mode 100644 core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json delete mode 100644 core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json delete mode 100644 core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json create mode 100644 core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json rename core/lib/dal/.sqlx/{query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json => query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json} (57%) rename core/lib/dal/.sqlx/{query-670f7d170122b6165ea521c482f2ec32d637a8c11af6472b9b390c6ca2b68495.json => query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json} (76%) delete mode 100644 core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json create mode 100644 core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json rename core/lib/dal/.sqlx/{query-9ea417e4ffef9e5d158089723692ba43fe8560be0c4aa7baa49e71b2a28187e7.json => query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json} (60%) rename core/lib/dal/.sqlx/{query-66c2d8f27715ee11b0a7c4b9fd7e2e6718eea8ba12757ec77889233542b15b40.json => query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json} (78%) create mode 100644 core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json create mode 100644 core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json rename core/lib/dal/.sqlx/{query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json => query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json} (62%) rename core/lib/dal/.sqlx/{query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json => query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json} (72%) rename core/lib/dal/.sqlx/{query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json => query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json} (73%) delete mode 100644 core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json rename core/lib/dal/.sqlx/{query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json => query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json} (69%) delete mode 100644 core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json rename core/lib/dal/.sqlx/{query-dc9a3821560030a8daf8dbdafe5f52aed204a20c67a6b959b16c2a60c745321e.json => query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json} (71%) rename core/lib/dal/.sqlx/{query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json => query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json} (73%) rename core/lib/dal/.sqlx/{query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json => query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json} (73%) delete mode 100644 core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json rename core/lib/dal/.sqlx/{query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json => query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json} (54%) create mode 100644 core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json delete mode 100644 core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json rename core/lib/dal/.sqlx/{query-31308e6469a98e9662ff284a89ce264ca7b68c54d894fad9d760324455321080.json => query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json} (71%) rename core/lib/dal/.sqlx/{query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json => query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json} (52%) rename core/lib/dal/.sqlx/{query-db3593883d5e1e636d65e25cc744637fc33467fbd64da5a431ecab194409371c.json => query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json} (73%) create mode 100644 core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json rename core/lib/dal/.sqlx/{query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json => query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json} (74%) create mode 100644 core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json rename core/lib/dal/.sqlx/{query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json => query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json} (55%) rename core/lib/dal/.sqlx/{query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json => query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json} (57%) create mode 100644 core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json rename core/lib/dal/.sqlx/{query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json => query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json} (57%) rename core/lib/dal/.sqlx/{query-7af141a4533b332903b7ba5591b1c90ac9deb75cd2a542fe649d7830496a0756.json => query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json} (72%) delete mode 100644 core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json delete mode 100644 core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json delete mode 100644 core/lib/dal/.sqlx/query-f461f21fcc8b8e88d7cb8cfc38a15f75badf7801f687af19163f5f533e20fbc7.json rename core/lib/dal/.sqlx/{query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json => query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json} (71%) create mode 100644 core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json rename core/lib/dal/.sqlx/{query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json => query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json} (58%) create mode 100644 core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql create mode 100644 core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql create mode 100644 core/lib/dal/migrations/20240911161714_evm-simulator.down.sql create mode 100644 core/lib/dal/migrations/20240911161714_evm-simulator.up.sql create mode 100644 core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql create mode 100644 core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql create mode 100644 core/lib/dal/src/consensus/conv.rs rename core/lib/dal/src/{consensus_dal.rs => consensus_dal/mod.rs} (74%) create mode 100644 core/lib/dal/src/consensus_dal/tests.rs delete mode 100644 core/lib/dal/src/tee_verifier_input_producer_dal.rs delete mode 100644 core/lib/eth_signer/src/error.rs create mode 100644 core/lib/external_price_api/src/cmc_api.rs create mode 100644 core/lib/l1_contract_interface/src/i_executor/structures/tests.rs create mode 100644 core/lib/multivm/src/pubdata_builders/mod.rs create mode 100644 core/lib/multivm/src/pubdata_builders/rollup.rs create mode 100644 core/lib/multivm/src/pubdata_builders/tests.rs create mode 100644 core/lib/multivm/src/pubdata_builders/utils.rs create mode 100644 core/lib/multivm/src/pubdata_builders/validium.rs delete mode 100644 core/lib/multivm/src/versions/README.md rename core/lib/multivm/src/versions/{tests.rs => shadow/mod.rs} (95%) create mode 100644 core/lib/multivm/src/versions/shadow/tests.rs delete mode 100644 core/lib/multivm/src/versions/testonly.rs create mode 100644 core/lib/multivm/src/versions/testonly/block_tip.rs rename core/lib/multivm/src/versions/{vm_1_4_2/tests => testonly}/bootloader.rs (53%) create mode 100644 core/lib/multivm/src/versions/testonly/bytecode_publishing.rs rename core/lib/multivm/src/versions/{vm_1_4_2/tests => testonly}/circuits.rs (61%) create mode 100644 core/lib/multivm/src/versions/testonly/code_oracle.rs rename core/lib/multivm/src/versions/{vm_1_4_2/tests => testonly}/default_aa.rs (50%) create mode 100644 core/lib/multivm/src/versions/testonly/gas_limit.rs create mode 100644 core/lib/multivm/src/versions/testonly/get_used_contracts.rs rename core/lib/multivm/src/versions/{vm_1_4_2/tests => testonly}/is_write_initial.rs (65%) rename core/lib/multivm/src/versions/{vm_1_4_2/tests => testonly}/l1_tx_execution.rs (71%) rename core/lib/multivm/src/versions/{vm_1_4_1/tests => testonly}/l2_blocks.rs (62%) create mode 100644 core/lib/multivm/src/versions/testonly/mod.rs create mode 100644 core/lib/multivm/src/versions/testonly/nonce_holder.rs create mode 100644 core/lib/multivm/src/versions/testonly/precompiles.rs rename core/lib/multivm/src/versions/{vm_refunds_enhancement/tests => testonly}/refunds.rs (51%) rename core/lib/multivm/src/versions/{vm_1_4_2/tests => testonly}/require_eip712.rs (61%) rename core/lib/multivm/src/versions/{vm_virtual_blocks/tests => testonly}/rollbacks.rs (50%) rename core/lib/multivm/src/versions/{vm_fast/tests/sekp256r1.rs => testonly/secp256r1.rs} (89%) rename core/lib/multivm/src/versions/{vm_1_4_2/tests => testonly}/simple_execution.rs (63%) create mode 100644 core/lib/multivm/src/versions/testonly/storage.rs create mode 100644 core/lib/multivm/src/versions/testonly/tester/mod.rs rename core/lib/multivm/src/versions/{vm_latest/tests => testonly}/tester/transaction_test_info.rs (87%) rename core/lib/multivm/src/versions/{vm_1_4_2/tests => testonly}/tracing_execution_error.rs (55%) create mode 100644 core/lib/multivm/src/versions/testonly/transfer.rs rename core/lib/multivm/src/versions/{vm_1_4_2/tests => testonly}/upgrade.rs (72%) delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs create mode 100644 core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_fast/tests/utils.rs create mode 100644 core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/migration.rs create mode 100644 core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/tests/utils.rs create mode 100644 core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs delete mode 100644 core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs create mode 100644 core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto create mode 100644 core/lib/protobuf_config/src/prover_autoscaler.rs create mode 100644 core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json create mode 100644 core/lib/vm_executor/src/testonly.rs create mode 100644 core/lib/vm_interface/src/pubdata/mod.rs delete mode 100644 core/node/api_server/src/tx_sender/tests.rs create mode 100644 core/node/api_server/src/tx_sender/tests/call.rs create mode 100644 core/node/api_server/src/tx_sender/tests/gas_estimation.rs create mode 100644 core/node/api_server/src/tx_sender/tests/mod.rs create mode 100644 core/node/api_server/src/tx_sender/tests/send_tx.rs delete mode 100644 core/node/consensus/src/batch.rs delete mode 100644 core/node/consensus/src/tests/batch.rs delete mode 100644 core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs delete mode 100644 core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs delete mode 100644 core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs delete mode 100644 core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs create mode 100644 core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs rename core/node/node_framework/src/implementations/layers/web3_api/{server.rs => server/mod.rs} (81%) create mode 100644 core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs delete mode 100644 core/node/tee_verifier_input_producer/Cargo.toml delete mode 100644 core/node/tee_verifier_input_producer/README.md delete mode 100644 core/node/tee_verifier_input_producer/src/lib.rs delete mode 100644 core/node/tee_verifier_input_producer/src/metrics.rs create mode 100644 docker/prover-autoscaler/Dockerfile rename docker/zk-environment/{20.04_amd64_cuda_11_8.Dockerfile => 22.04_amd64_cuda_11_8.Dockerfile} (94%) rename docker/zk-environment/{20.04_amd64_cuda_12_0.Dockerfile => 22.04_amd64_cuda_12_0.Dockerfile} (95%) create mode 100644 docs/specs/zk_chains/gateway.md delete mode 100644 docs/specs/zk_chains/hyperbridges.md create mode 100644 docs/specs/zk_chains/interop.md create mode 100644 etc/contracts-test-data/contracts/mock-evm/mock-evm.sol create mode 100644 etc/multivm_bootloaders/vm_gateway/commit create mode 100644 etc/upgrades/1728066632-protocol-defense/common.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/crypto.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/facets.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json create mode 100644 etc/upgrades/1728066632-protocol-defense/stage/transactions.json create mode 100644 prover/crates/bin/prover_autoscaler/Cargo.toml create mode 100644 prover/crates/bin/prover_autoscaler/src/agent.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/cluster_types.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/global/mod.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/global/queuer.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/global/scaler.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/global/watcher.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/k8s/mod.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/lib.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/main.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/metrics.rs create mode 100644 prover/crates/bin/prover_autoscaler/src/task_wiring.rs delete mode 100644 zk_toolbox/crates/common/src/term/spinner.rs delete mode 100644 zk_toolbox/crates/common/src/wallets.rs delete mode 100644 zk_toolbox/crates/zk_inception/build.rs delete mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs delete mode 100644 zk_toolbox/crates/zk_supervisor/Cargo.toml delete mode 100644 zk_toolbox/crates/zk_supervisor/README.md delete mode 100644 zk_toolbox/crates/zk_supervisor/src/main.rs delete mode 100644 zk_toolbox/rust-toolchain delete mode 100644 zk_toolbox/zkup/README.md delete mode 100755 zk_toolbox/zkup/install delete mode 100755 zk_toolbox/zkup/zkup rename {zk_toolbox => zkstack_cli}/CHANGELOG.md (100%) rename {zk_toolbox => zkstack_cli}/Cargo.lock (96%) rename {zk_toolbox => zkstack_cli}/Cargo.toml (81%) rename {zk_toolbox => zkstack_cli}/README.md (84%) rename {zk_toolbox => zkstack_cli}/crates/common/Cargo.toml (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/cmd.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/config.rs (100%) create mode 100644 zkstack_cli/crates/common/src/contracts.rs rename {zk_toolbox => zkstack_cli}/crates/common/src/db.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/docker.rs (89%) rename {zk_toolbox => zkstack_cli}/crates/common/src/ethereum.rs (96%) rename {zk_toolbox => zkstack_cli}/crates/common/src/external_node.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/files.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/forge.rs (97%) rename {zk_toolbox => zkstack_cli}/crates/common/src/git.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/hardhat.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/lib.rs (91%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prerequisites.rs (52%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prompt/confirm.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prompt/input.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prompt/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/prompt/select.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/server.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/term/error.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/term/logger.rs (97%) rename {zk_toolbox => zkstack_cli}/crates/common/src/term/mod.rs (100%) create mode 100644 zkstack_cli/crates/common/src/term/spinner.rs rename {zk_toolbox => zkstack_cli}/crates/common/src/version.rs (100%) create mode 100644 zkstack_cli/crates/common/src/wallets.rs rename {zk_toolbox => zkstack_cli}/crates/common/src/withdraw.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/common/src/yaml.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/Cargo.toml (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/apps.rs (96%) rename {zk_toolbox => zkstack_cli}/crates/config/src/chain.rs (94%) rename {zk_toolbox => zkstack_cli}/crates/config/src/consensus_config.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/consensus_secrets.rs (67%) rename {zk_toolbox => zkstack_cli}/crates/config/src/consts.rs (97%) rename {zk_toolbox => zkstack_cli}/crates/config/src/contracts.rs (95%) rename {zk_toolbox => zkstack_cli}/crates/config/src/docker_compose.rs (94%) rename {zk_toolbox => zkstack_cli}/crates/config/src/ecosystem.rs (93%) rename {zk_toolbox => zkstack_cli}/crates/config/src/explorer.rs (98%) rename {zk_toolbox => zkstack_cli}/crates/config/src/explorer_compose.rs (98%) rename {zk_toolbox => zkstack_cli}/crates/config/src/external_node.rs (82%) rename {zk_toolbox => zkstack_cli}/crates/config/src/file_config.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/accept_ownership/mod.rs (71%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_ecosystem/input.rs (96%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_ecosystem/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_ecosystem/output.rs (95%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs (98%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs (92%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_l2_contracts/input.rs (92%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/deploy_l2_contracts/output.rs (72%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/gateway_preparation/input.rs (95%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/gateway_preparation/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/gateway_preparation/output.rs (78%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/paymaster/mod.rs (83%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/register_chain/input.rs (97%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/register_chain/mod.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/register_chain/output.rs (82%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/script_params.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs (86%) rename {zk_toolbox => zkstack_cli}/crates/config/src/gateway.rs (92%) rename {zk_toolbox => zkstack_cli}/crates/config/src/general.rs (96%) rename {zk_toolbox => zkstack_cli}/crates/config/src/genesis.rs (64%) rename {zk_toolbox => zkstack_cli}/crates/config/src/lib.rs (90%) rename {zk_toolbox => zkstack_cli}/crates/config/src/manipulations.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/portal.rs (98%) rename {zk_toolbox => zkstack_cli}/crates/config/src/secrets.rs (80%) rename {zk_toolbox => zkstack_cli}/crates/config/src/traits.rs (95%) rename {zk_toolbox => zkstack_cli}/crates/config/src/wallet_creation.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/config/src/wallets.rs (75%) rename {zk_toolbox => zkstack_cli}/crates/git_version_macro/Cargo.toml (100%) rename {zk_toolbox => zkstack_cli}/crates/git_version_macro/src/lib.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/Cargo.toml (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/base_token.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/l1_network.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/lib.rs (71%) rename {zk_toolbox => zkstack_cli}/crates/types/src/prover_mode.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/token_info.rs (100%) rename {zk_toolbox => zkstack_cli}/crates/types/src/wallet_creation.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/Cargo.toml (69%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/README.md (89%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/abi/ConsensusRegistry.json (100%) create mode 100644 zkstack_cli/crates/zkstack/build.rs create mode 100644 zkstack_cli/crates/zkstack/completion/_zkstack.zsh create mode 100644 zkstack_cli/crates/zkstack/completion/zkstack.fish create mode 100644 zkstack_cli/crates/zkstack/completion/zkstack.sh rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/accept_ownership.rs (94%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/args/containers.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/args/mod.rs (71%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/args/run_server.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/args/update.rs (100%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/autocomplete.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/build_transactions.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/create.rs (87%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/genesis.rs (55%) rename zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs => zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs (50%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/build_transactions.rs (96%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/common.rs (58%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/convert_to_gateway.rs (95%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/create.rs (97%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/deploy_l2_contracts.rs (89%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/deploy_paymaster.rs (94%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs rename zk_toolbox/crates/zk_inception/src/commands/chain/init.rs => zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs (58%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/migrate_from_gateway.rs (96%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/migrate_to_gateway.rs (94%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/mod.rs (65%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/set_token_multiplier_setter.rs (94%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/chain/setup_legacy_bridge.rs (94%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs rename zk_toolbox/crates/zk_inception/src/commands/consensus.rs => zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs (79%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto create mode 100644 zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/containers.rs (90%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/args/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/args/releases.rs (81%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/init.rs (96%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/contract_verifier/run.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/clean/mod.rs (92%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/config_writer.rs (96%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/contracts.rs (66%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/args/mod.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/args/new_migration.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/check_sqlx_data.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/drop.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/migrate.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/mod.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/new_migration.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/prepare.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/reset.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/database/setup.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/fmt.rs (92%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/lint.rs (62%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/lint_utils.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/mod.rs (87%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/args/insert_batch.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/args/insert_version.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/args/mod.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/info.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/insert_batch.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/insert_version.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/prover/mod.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/send_transactions/args/mod.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/send_transactions/mod.rs (99%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/snapshot.rs (91%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/sql_fmt.rs (97%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/fees.rs (65%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/integration.rs (63%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/mod.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/recovery.rs (66%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/revert.rs (85%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/rust.rs (70%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/args/upgrade.rs (72%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/build.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/db.rs (83%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/fees.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/integration.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/l1_contracts.rs (86%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/loadtest.rs (95%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/mod.rs (92%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/prover.rs (97%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/recovery.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/revert.rs (98%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/rust.rs (94%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/upgrade.rs (91%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/utils.rs (93%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/commands/test/wallet.rs (96%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/consts.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/dals.rs (95%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/defaults.rs (100%) rename {zk_toolbox/crates/zk_supervisor/src => zkstack_cli/crates/zkstack/src/commands/dev}/messages.rs (90%) create mode 100644 zkstack_cli/crates/zkstack/src/commands/dev/mod.rs rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/build_transactions.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/change_default.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/create.rs (94%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/init.rs (79%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/build_transactions.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/change_default.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/common.rs (94%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/create.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/create_configs.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/init.rs (79%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/setup_observability.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/ecosystem/utils.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/explorer/backend.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/explorer/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/explorer/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/explorer/run.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/args/prepare_configs.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/args/run.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/prepare_configs.rs (82%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/external_node/run.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/mod.rs (86%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/portal.rs (98%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/compressor_keys.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/init.rs (92%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/init_bellman_cuda.rs (58%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/run.rs (77%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/args/setup_keys.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/compressor_keys.rs (80%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/gcs.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/init.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/init_bellman_cuda.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/run.rs (88%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/prover/setup_keys.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/server.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/commands/update.rs (95%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/consts.rs (61%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/defaults.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/external_node.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/main.rs (56%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/messages.rs (94%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/consensus.rs (70%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/forge.rs (74%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/mod.rs (100%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/ports.rs (82%) rename {zk_toolbox/crates/zk_inception => zkstack_cli/crates/zkstack}/src/utils/rocks_db.rs (100%) create mode 100644 zkstack_cli/rust-toolchain create mode 100644 zkstack_cli/zkstackup/README.md create mode 100755 zkstack_cli/zkstackup/install create mode 100755 zkstack_cli/zkstackup/zkstackup diff --git a/.githooks/pre-push b/.githooks/pre-push index 73168e08ec4..ef5e77cbc79 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -6,14 +6,29 @@ RED='\033[0;31m' NC='\033[0m' # No Color +# Common prompts +INSTALL_PROPT="Please install ZK Stack CLI using zkstackup from https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli/zkstackup" +FORMAT_PROMPT="Please format the code via 'zkstack dev fmt', cannot push unformatted code" + # Check that prettier formatting rules are not violated. -if which zk_supervisor >/dev/null; then - if ! zk_supervisor fmt --check; then +if which zkstack >/dev/null; then + if ! zkstack dev fmt --check; then echo -e "${RED}Push error!${NC}" - echo "Please format the code via 'zks fmt', cannot push unformatted code" + echo -e "${FORMAT_PROMPT}" exit 1 fi else - echo "Please install zk_toolbox using zkup from https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup, and then run ./bin/zkt from the zksync-era repository." - exit 1 + if which zk_supervisor >/dev/null; then + echo -e "${RED}WARNING: zkup, zk_inception/zki, and zk_supervisor/zks are DEPRECATED.${NC}" + echo -e "${RED}${INSTALL_PROPT}${NC}" + + if ! zk_supervisor fmt --check; then + echo -e "${RED}Push error!${NC}" + echo -e "${FORMAT_PROMPT}" + exit 1 + fi + else + echo -e "${INSTALL_PROPT}" + exit 1 + fi fi diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index a712db9f75b..d68b45e9d43 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,4 +17,4 @@ - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. -- [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. +- [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. diff --git a/.github/release-please/config.json b/.github/release-please/config.json index 86839e804ca..358e249a18b 100644 --- a/.github/release-please/config.json +++ b/.github/release-please/config.json @@ -20,9 +20,9 @@ "release-type": "simple", "component": "prover" }, - "zk_toolbox": { + "zkstack_cli": { "release-type": "simple", - "component": "zk_toolbox", + "component": "zkstack_cli", "plugins": [ "cargo-workspace" ] diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 44e10fb13fd..a0d1d73bdda 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.27.0", + "core": "25.0.0", "prover": "16.5.0", - "zk_toolbox": "0.1.2" + "zkstack_cli": "0.1.2" } diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index bb385b2797b..1481e542de5 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -101,7 +101,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh @@ -113,15 +112,19 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run ./bin/zkt || true ci_run ./bin/zk || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + - name: install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + ci_run zkstack dev contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 9d00f98b181..15d4432191d 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -71,11 +71,15 @@ jobs: if [ $(jq length <<<"$tags") -eq 0 ]; then echo "No tag found on all pages." echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/artifacts/ exit 0 fi filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") if [[ ! -z "$filtered_tag" ]]; then echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/out break fi ((page++)) @@ -110,7 +114,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh @@ -123,14 +126,19 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run ./bin/zk || true - ci_run ./bin/zkt || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + ci_run zkstack dev contracts --system-contracts --l1-contracts --l2-contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index f664bfaaa00..cbb4239b572 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -50,9 +50,13 @@ jobs: - name: start-services run: | - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g - name: init run: | @@ -61,9 +65,11 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run zk - ci_run zkt ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + + - name: build contracts + run: | + ci_run zkstack dev contracts - name: update-image run: | diff --git a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml index b92fb8e8111..30990889caf 100644 --- a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml @@ -28,7 +28,6 @@ jobs: - name: Download Setup data run: | gsutil -m rsync -r gs://matterlabs-setup-data-us/${{ inputs.setup_keys_id }} docker/prover-gpu-fri-gar - cp -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ - name: Login to us-central1 GAR run: | @@ -70,6 +69,10 @@ jobs: --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + - name: Move Setup data from prover-gpu-fri-gar to circuit-prover-gpu-gar + run: | + mv -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ + - name: Build and push circuit-prover-gpu-gar uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index d6ec61114c7..91de5dd51ec 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -45,7 +45,7 @@ jobs: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: ${{ inputs.CUDA_ARCH }} - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: component: @@ -56,6 +56,7 @@ jobs: - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor + - prover-autoscaler outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} steps: @@ -74,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server @@ -91,7 +91,6 @@ jobs: run: | ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key - - name: login to Docker registries if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) run: | diff --git a/.github/workflows/build-witness-generator-template.yml b/.github/workflows/build-witness-generator-template.yml index 33d78b3cf2f..d9493f97cae 100644 --- a/.github/workflows/build-witness-generator-template.yml +++ b/.github/workflows/build-witness-generator-template.yml @@ -75,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 2f51229aeaf..ea91fc4a7cd 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -27,15 +27,15 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - - - name: Init + + - name: Install zkstack run: | - ci_run zkt + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local # This does both linting and "building". We're using `zk lint prover` as it's common practice within our repo # `zk lint prover` = cargo clippy, which does cargo check behind the scenes, which is a lightweight version of cargo build - name: Lints - run: ci_run zk_supervisor lint -t rs --check + run: ci_run zkstack dev lint -t rs --check diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 6d0785fe46f..0babbd1c9db 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -26,24 +26,31 @@ jobs: - name: Start services run: | ci_localnet_up + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local - name: Build run: | - ci_run ./bin/zkt ci_run yarn install ci_run git config --global --add safe.directory /usr/src/zksync - ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + - name: Database setup + run: | + ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Lints run: | - ci_run zk_supervisor fmt --check - ci_run zk_supervisor lint -t md --check - ci_run zk_supervisor lint -t sol --check - ci_run zk_supervisor lint -t js --check - ci_run zk_supervisor lint -t ts --check - ci_run zk_supervisor lint -t rs --check + ci_run zkstack dev fmt --check + ci_run zkstack dev lint -t md --check + ci_run zkstack dev lint -t sol --check + ci_run zkstack dev lint -t js --check + ci_run zkstack dev lint -t ts --check + ci_run zkstack dev lint -t rs --check + ci_run zkstack dev lint -t autocompletion --check - name: Check Database run: | - ci_run zk_supervisor database check-sqlx-data --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + ci_run zkstack dev database check-sqlx-data --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index fed26bbbb3b..0e1c69ae4db 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -8,11 +8,14 @@ on: required: false default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' +env: + RUST_BACKTRACE: 1 + PASSED_ENV_VARS: RUST_BACKTRACE + jobs: lint: name: lint uses: ./.github/workflows/ci-core-lint-reusable.yml - unit-tests: runs-on: [ matterlabs-ci-runner-highmem-long ] @@ -57,102 +60,113 @@ jobs: - name: Init run: | ci_run run_retried rustup show - ci_run ./bin/zkt - ci_run zk_supervisor contracts - # FIXME: enable contract tests once tehy are stable - #- name: Contracts unit tests - # run: ci_run yarn l1-contracts test + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + + - name: Build contracts + run: | + ci_run zkstack dev contracts + +# - name: Contracts unit tests +# run: ci_run yarn l1-contracts test - name: Rust unit tests run: | - ci_run zk_supervisor test rust + ci_run zkstack dev test rust # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. - ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch - - # FIXME: support loadtest together with sync layer. - # loadtest: - # runs-on: [ matterlabs-ci-runner-high-performance ] - # strategy: - # fail-fast: false - # matrix: - # # FIXME: support new VM mode - # vm_mode: ["OLD"] - - # steps: - # - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - # with: - # submodules: "recursive" - # fetch-depth: 0 - - # - name: Setup environment - # run: | - # echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - # echo $(pwd)/bin >> $GITHUB_PATH - # echo IN_DOCKER=1 >> .env - # echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env - # echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env - # echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env - # echo "RUSTC_WRAPPER=sccache" >> .env - - # - name: Loadtest configuration - # run: | - # echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env - # echo ACCOUNTS_AMOUNT="100" >> .env - # echo MAX_INFLIGHT_TXS="10" >> .env - # echo SYNC_API_REQUESTS_LIMIT="15" >> .env - # echo FAIL_FAST=true >> .env - # echo IN_DOCKER=1 >> .env - - # - name: Start services - # run: | - # ci_localnet_up - # ci_run sccache --start-server - - # - name: Init - # run: | - # ci_run git config --global --add safe.directory /usr/src/zksync - # ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen - # ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts - # ci_run git config --global --add safe.directory /usr/src/zksync/contracts - - # ci_run ./bin/zkt - # ci_run zk_inception chain create \ - # --chain-name legacy \ - # --chain-id sequential \ - # --prover-mode no-proofs \ - # --wallet-creation localhost \ - # --l1-batch-commit-data-generator-mode rollup \ - # --base-token-address 0x0000000000000000000000000000000000000001 \ - # --base-token-price-nominator 1 \ - # --base-token-price-denominator 1 \ - # --set-as-default false \ - # --ignore-prerequisites \ - # --legacy-bridge - - # ci_run zk_inception ecosystem init --dev --verbose - # ci_run zk_supervisor contracts --test-contracts - - # # `sleep 60` because we need to wait until server added all the tokens - # - name: Run server - # run: | - # ci_run zk_supervisor config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy - # ci_run zk_inception server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & - # ci_run sleep 60 - - # - name: Perform loadtest - # run: ci_run zk_supervisor t loadtest -v --chain=legacy - - # - name: Show server.log logs - # if: always() - # run: ci_run cat server.log || true - - # - name: Show sccache logs - # if: always() - # run: | - # ci_run sccache --show-stats || true - # ci_run cat /tmp/sccache_log.txt || true +# ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch + +# loadtest: +# runs-on: [ matterlabs-ci-runner-high-performance ] +# strategy: +# fail-fast: false +# matrix: +# vm_mode: [ "OLD", "NEW" ] +# +# steps: +# - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 +# with: +# submodules: "recursive" +# fetch-depth: 0 +# +# - name: Setup environment +# run: | +# echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV +# echo $(pwd)/bin >> $GITHUB_PATH +# echo IN_DOCKER=1 >> .env +# echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env +# echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env +# echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env +# echo "RUSTC_WRAPPER=sccache" >> .env +# +# - name: Loadtest configuration +# run: | +# echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env +# echo ACCOUNTS_AMOUNT="100" >> .env +# echo MAX_INFLIGHT_TXS="10" >> .env +# echo SYNC_API_REQUESTS_LIMIT="15" >> .env +# echo FAIL_FAST=true >> .env +# echo IN_DOCKER=1 >> .env +# +# - name: Start services +# run: | +# ci_localnet_up +# ci_run sccache --start-server +# +# - name: Init +# run: | +# ci_run git config --global --add safe.directory /usr/src/zksync +# ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen +# ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts +# ci_run git config --global --add safe.directory /usr/src/zksync/contracts +# +# - name: Install zkstack +# run: | +# ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true +# ci_run zkstackup -g --local +# +# +# - name: Create and initialize legacy chain +# run: | +# ci_run zkstack chain create \ +# --chain-name legacy \ +# --chain-id sequential \ +# --prover-mode no-proofs \ +# --wallet-creation localhost \ +# --l1-batch-commit-data-generator-mode rollup \ +# --base-token-address 0x0000000000000000000000000000000000000001 \ +# --base-token-price-nominator 1 \ +# --base-token-price-denominator 1 \ +# --set-as-default false \ +# --ignore-prerequisites \ +# --legacy-bridge +# +# ci_run zkstack ecosystem init --dev --verbose +# ci_run zkstack dev contracts --test-contracts +# +# # `sleep 60` because we need to wait until server added all the tokens +# - name: Run server +# run: | +# ci_run zkstack dev config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy +# ci_run zkstack server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & +# ci_run sleep 60 +# +# - name: Perform loadtest +# run: ci_run zkstack dev t loadtest -v --chain=legacy +# +# - name: Show server.log logs +# if: always() +# run: ci_run cat server.log || true +# +# - name: Show sccache logs +# if: always() +# run: | +# ci_run sccache --show-stats || true +# ci_run cat /tmp/sccache_log.txt || true integration-tests: runs-on: [ matterlabs-ci-runner-ultra-performance ] @@ -172,14 +186,17 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" >> .env echo RUN_CONTRACT_VERIFICATION_TEST=true >> $GITHUB_ENV - name: Start services run: | ci_localnet_up - - name: Build zk_toolbox - run: ci_run bash -c "./bin/zkt" + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local - name: Create log directories run: | @@ -209,21 +226,19 @@ jobs: echo "EXTERNAL_NODE_LOGS_DIR=$EXTERNAL_NODE_LOGS_DIR" >> $GITHUB_ENV echo "FEES_LOGS_DIR=$FEES_LOGS_DIR" >> $GITHUB_ENV echo "REVERT_LOGS_DIR=$REVERT_LOGS_DIR" >> $GITHUB_ENV -# FIXME: restore tests for all the various types of chains + - name: Initialize ecosystem run: | ci_run git config --global --add safe.directory /usr/src/zksync ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ - --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_era \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_era \ - --ignore-prerequisites --verbose \ - --observability=false + ci_run zkstack ecosystem init --deploy-paymaster --deploy-erc20 \ + --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_era \ + --ignore-prerequisites --verbose \ + --observability=false - name: Read Custom Token address and set as environment variable run: | @@ -233,7 +248,7 @@ jobs: - name: Create and initialize Validium chain run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name validium \ --chain-id sequential \ --prover-mode no-proofs \ @@ -245,18 +260,16 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_validium \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_validium \ --chain validium - name: Create and initialize chain with Custom Token run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name custom_token \ --chain-id sequential \ --prover-mode no-proofs \ @@ -268,86 +281,201 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_custom_token \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_custom_token \ --chain custom_token +# - name: Create and register chain with transactions signed "offline" +# run: | +# ci_run zkstack chain create \ +# --chain-name offline_chain \ +# --chain-id sequential \ +# --prover-mode no-proofs \ +# --wallet-creation localhost \ +# --l1-batch-commit-data-generator-mode rollup \ +# --base-token-address 0x0000000000000000000000000000000000000001 \ +# --base-token-price-nominator 1 \ +# --base-token-price-denominator 1 \ +# --set-as-default false \ +# --ignore-prerequisites +# +# ci_run zkstack chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 +# +# governor_pk=$(awk '/governor:/ {flag=1} flag && /private_key:/ {print $2; exit}' ./configs/wallets.yaml) +# +# ci_run zkstack dev send-transactions \ +# --file ./transactions/chain/offline_chain/register-hyperchain-txns.json \ +# --l1-rpc-url http://127.0.0.1:8545 \ +# --private-key $governor_pk +# +# bridge_hub=$(awk '/bridgehub_proxy_addr/ {print $2}' ./configs/contracts.yaml) +# chain_id=$(awk '/chain_id:/ {print $2}' ./chains/offline_chain/ZkStack.yaml) +# +# hyperchain_output=$(ci_run cast call $bridge_hub "getHyperchain(uint256)" $chain_id) +# +# if [[ $hyperchain_output == 0x* && ${#hyperchain_output} -eq 66 ]]; then +# echo "Chain successfully registered: $hyperchain_output" +# else +# echo "Failed to register chain: $hyperchain_output" +# exit 1 +# fi + + - name: Create and initialize Consensus chain + run: | + ci_run zkstack chain create \ + --chain-name consensus \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode validium \ + --base-token-address ${{ env.CUSTOM_TOKEN_ADDRESS }} \ + --base-token-price-nominator 3 \ + --base-token-price-denominator 2 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zkstack chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_consensus \ + --chain consensus + + - name: Export chain list to environment variable + run: | + CHAINS="era,validium,custom_token,consensus" + echo "CHAINS=$CHAINS" >> $GITHUB_ENV + - name: Initialize gateway chain run: | - ci_run zk_inception chain create \ - --chain-name gateway \ - --chain-id 505 \ - --prover-mode no-proofs \ - --wallet-creation localhost \ - --l1-batch-commit-data-generator-mode rollup \ - --base-token-address 0x0000000000000000000000000000000000000001 \ - --base-token-price-nominator 1 \ - --base-token-price-denominator 1 \ - --set-as-default false \ - --ignore-prerequisites - - ci_run zk_inception chain init \ - --deploy-paymaster \ - --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_gateway \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_gateway \ - --chain gateway - - ci_run zk_inception chain convert-to-gateway --chain gateway --ignore-prerequisites + ci_run zkstack chain create \ + --chain-name gateway \ + --chain-id 505 \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zkstack chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_gateway \ + --chain gateway + + ci_run zkstack chain convert-to-gateway --chain gateway --ignore-prerequisites - name: Run gateway run: | - ci_run zk_inception server --ignore-prerequisites --chain gateway &> ${{ env.SERVER_LOGS_DIR }}/gateway.log & - + ci_run zkstack server --ignore-prerequisites --chain gateway &> ${{ env.SERVER_LOGS_DIR }}/gateway.log & ci_run sleep 5 - name: Migrate chains to gateway run: | - ci_run zk_inception chain migrate-to-gateway --chain era --gateway-chain-name gateway - ci_run zk_inception chain migrate-to-gateway --chain validium --gateway-chain-name gateway - ci_run zk_inception chain migrate-to-gateway --chain custom_token --gateway-chain-name gateway - + ci_run zkstack chain migrate-to-gateway --chain era --gateway-chain-name gateway + ci_run zkstack chain migrate-to-gateway --chain validium --gateway-chain-name gateway + ci_run zkstack chain migrate-to-gateway --chain custom_token --gateway-chain-name gateway + ci_run zkstack chain migrate-to-gateway --chain consensus --gateway-chain-name gateway + - name: Migrate back era run: | - ci_run zk_inception chain migrate-from-gateway --chain era --gateway-chain-name gateway + ci_run zkstack chain migrate-from-gateway --chain era --gateway-chain-name gateway - name: Migrate to gateway again run: | - ci_run zk_inception chain migrate-to-gateway --chain era --gateway-chain-name gateway + ci_run zkstack chain migrate-to-gateway --chain era --gateway-chain-name gateway - name: Build test dependencies run: | - ci_run zk_supervisor test build + ci_run zkstack dev test build - - name: Run chains + - name: Initialize Contract verifier run: | - ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & - ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & - ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & + ci_run zkstack contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era + ci_run zkstack contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & + + - name: Run servers + run: | + ci_run zkstack server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & + ci_run zkstack server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & + ci_run zkstack server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & + ci_run zkstack server --ignore-prerequisites --chain consensus \ + --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ + &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & ci_run sleep 5 - - name: Run integration tests + - name: Setup attester committee for the consensus chain run: | - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & - PID2=$! + ci_run zkstack consensus set-attester-committee --chain consensus --from-genesis &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & - PID3=$! + - name: Run integration tests + run: | + ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} + +# - name: Init external nodes +# run: | +# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ +# --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era +# ci_run zkstack external-node init --ignore-prerequisites --chain era +# +# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ +# --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium +# ci_run zkstack external-node init --ignore-prerequisites --chain validium +# +# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ +# --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token +# ci_run zkstack external-node init --ignore-prerequisites --chain custom_token +# +# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ +# --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus +# ci_run zkstack external-node init --ignore-prerequisites --chain consensus +# +# - name: Run recovery tests (from snapshot) +# run: | +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --snapshot --no-deps --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} +# +# - name: Run recovery tests (from genesis) +# run: | +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} +# +# - name: Run external node server +# run: | +# ci_run zkstack external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & +# ci_run zkstack external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & +# ci_run zkstack external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & +# ci_run zkstack external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & +# +# - name: Run integration tests en +# run: | +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites --external-node" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} +# +# - name: Fee projection tests +# run: | +# ci_run killall -INT zksync_server || true +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.FEES_LOGS_DIR }} +# +# - name: Run revert tests +# run: | +# ci_run killall -INT zksync_server || true +# ci_run killall -INT zksync_external_node || true +# +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test revert --no-deps --external-node --no-kill --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} +# +# # Upgrade tests should run last, because as soon as they +# # finish the bootloader will be different +# # TODO make upgrade tests safe to run multiple times +# - name: Run upgrade test +# run: | +# ci_run zkstack dev test upgrade --no-deps --chain era - wait $PID1 - wait $PID2 - wait $PID3 - name: Upload logs uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 5b1d5a9bcdf..e1a9cf78df7 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -27,12 +27,17 @@ jobs: run_retried docker compose pull zk docker compose up -d zk + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + - name: Build run: | - ci_run ./bin/zkt ci_run yarn install ci_run git config --global --add safe.directory /usr/src/zksync - name: Lints run: | - ci_run zk_supervisor lint -t md --check + ci_run zkstack dev fmt --check + ci_run zkstack dev lint -t md --check diff --git a/.github/workflows/ci-prover-e2e.yml b/.github/workflows/ci-prover-e2e.yml new file mode 100644 index 00000000000..e69945eaaf2 --- /dev/null +++ b/.github/workflows/ci-prover-e2e.yml @@ -0,0 +1,126 @@ +name: Workflow for testing prover component end-to-end +on: + workflow_call: + +jobs: + e2e-test: + runs-on: [ matterlabs-ci-gpu-l4-runner-prover-tests ] + env: + RUNNER_COMPOSE_FILE: "docker-compose-gpu-runner-cuda-12-0.yml" + + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + with: + submodules: "recursive" + fetch-depth: 0 + + - name: Setup environment + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + + mkdir -p prover_logs + +# - name: Start services +# run: | +# run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull +# docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait +# ci_run sccache --start-server +# +# - name: Init +# run: | +# ci_run git config --global --add safe.directory "*" +# ci_run chmod -R +x ./bin +# +# ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true +# ci_run zkstackup -g --local +# +# ci_run zkstack chain create \ +# --chain-name proving_chain \ +# --chain-id sequential \ +# --prover-mode gpu \ +# --wallet-creation localhost \ +# --l1-batch-commit-data-generator-mode rollup \ +# --base-token-address 0x0000000000000000000000000000000000000001 \ +# --base-token-price-nominator 1 \ +# --base-token-price-denominator 1 \ +# --set-as-default true \ +# --ignore-prerequisites +# +# ci_run zkstack ecosystem init --dev --verbose +# ci_run zkstack prover init --dev --verbose +# +# echo "URL=$(grep "http_url" ./chains/proving_chain/configs/general.yaml | awk '{ print $2 }')" >> $GITHUB_ENV +# - name: Build prover binaries +# run: | +# ci_run cargo build --release --workspace --manifest-path=prover/Cargo.toml +# - name: Prepare prover subsystem +# run: | +# ci_run zkstack prover init-bellman-cuda --clone --verbose +# ci_run zkstack prover setup-keys --mode=download --region=us --verbose +# - name: Run server +# run: | +# ci_run zkstack server --uring --chain=proving_chain --components=api,tree,eth,state_keeper,commitment_generator,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip &>prover_logs/server.log & +# - name: Run Gateway +# run: | +# ci_run zkstack prover run --component=gateway --docker=false &>prover_logs/gateway.log & +# - name: Run Prover Job Monitor +# run: | +# ci_run zkstack prover run --component=prover-job-monitor --docker=false &>prover_logs/prover-job-monitor.log & +# - name: Wait for batch to be passed through gateway +# env: +# DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain +# BATCH_NUMBER: 1 +# INTERVAL: 30 +# TIMEOUT: 300 +# run: | +# PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ +# ci_run ./bin/prover_checkers/batch_availability_checker +# - name: Run Witness Generator +# run: | +# ci_run zkstack prover run --component=witness-generator --round=all-rounds --docker=false &>prover_logs/witness-generator.log & +# - name: Run Circuit Prover +# run: | +# ci_run zkstack prover run --component=circuit-prover --witness-vector-generator-count=10 --docker=false &>prover_logs/circuit_prover.log & +# - name: Wait for prover jobs to finish +# env: +# DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain +# BATCH_NUMBER: 1 +# INTERVAL: 30 +# TIMEOUT: 1200 +# run: | +# PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ +# ci_run ./bin/prover_checkers/prover_jobs_status_checker +# +# - name: Kill prover & start compressor +# run: | +# sudo ./bin/prover_checkers/kill_prover +# +# ci_run zkstack prover run --component=compressor --docker=false &>prover_logs/compressor.log & +# - name: Wait for batch to be executed on L1 +# env: +# DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain +# BATCH_NUMBER: 1 +# INTERVAL: 30 +# TIMEOUT: 600 +# run: | +# PASSED_ENV_VARS="BATCH_NUMBER,DATABASE_URL,URL,INTERVAL,TIMEOUT" \ +# ci_run ./bin/prover_checkers/batch_l1_status_checker +# +# - name: Upload logs +# uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 +# if: always() +# with: +# name: prover_logs +# path: prover_logs +# +# - name: Show sccache logs +# if: always() +# run: | +# ci_run sccache --show-stats || true +# ci_run cat /tmp/sccache_log.txt || true diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 3f842b23488..7f719b2240d 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -27,16 +27,21 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - - name: Init + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + + - name: Database setup run: | - ci_run zkt - ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Formatting - run: ci_run bash -c "cd prover && cargo fmt --check" + run: | + ci_run git config --global --add safe.directory /usr/src/zksync + ci_run zkstack dev fmt --check rustfmt unit-tests: runs-on: [ matterlabs-ci-runner-highmem-long ] @@ -62,15 +67,18 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: Init run: | - ci_run zkt ci_run run_retried rustup show - name: Prover unit tests run: | # Not all tests are enabled, since prover and setup_key_generator_and_server requires bellman-cuda to be present - ci_run zk_supervisor test prover + ci_run zkstack dev test prover diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9c8817cf5cc..e4bf1596d48 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: outputs: core: ${{ steps.changed-files.outputs.core_any_changed }} prover: ${{ steps.changed-files.outputs.prover_any_changed }} - zk_toolbox: ${{ steps.changed-files.outputs.zk_toolbox_any_changed }} + zkstack_cli: ${{ steps.changed-files.outputs.zkstack_cli_any_changed }} docs: ${{ steps.changed-files.outputs.docs_any_changed }} all: ${{ steps.changed-files.outputs.all_any_changed }} steps: @@ -43,6 +43,9 @@ jobs: - '!prover/extract-setup-data-keys.sh' - 'docker/prover*/**' - '.github/workflows/build-prover-template.yml' + - '.github/workflows/new-build-prover-template.yml' + - '.github/workflows/build-witness-generator-template.yml' + - '.github/workflows/new-build-witness-generator-template.yml' - '.github/workflows/ci-prover-reusable.yml' - 'docker-compose-runner-nightly.yml' - '!**/*.md' @@ -54,12 +57,14 @@ jobs: - 'docker/external-node/**' - 'docker/server/**' - '.github/workflows/build-core-template.yml' + - '.github/workflows/new-build-core-template.yml' - '.github/workflows/build-contract-verifier-template.yml' + - '.github/workflows/new-build-contract-verifier-template.yml' - '.github/workflows/ci-core-reusable.yml' - '.github/workflows/ci-core-lint-reusable.yml' - 'Cargo.toml' - 'Cargo.lock' - - 'zk_toolbox/**' + - 'zkstack_cli/**' - '!**/*.md' - '!**/*.MD' - 'docker-compose.yml' @@ -95,6 +100,12 @@ jobs: name: CI for Prover Components uses: ./.github/workflows/ci-prover-reusable.yml + e2e-for-prover: + name: E2E Test for Prover Components + needs: changed_files + if: ${{(needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + uses: ./.github/workflows/ci-prover-e2e.yml + ci-for-docs: needs: changed_files if: needs.changed_files.outputs.docs == 'true' diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml index 42791eab666..0d6e2049ad0 100644 --- a/.github/workflows/new-build-contract-verifier-template.yml +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -38,6 +38,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo CI=1 >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $HOME/.local/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env @@ -73,39 +74,39 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' + run: | + mkdir ./foundry-zksync + curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz + tar zxf foundry_nightly_linux_amd64.tar.gz -C ./foundry-zksync + chmod +x ./foundry-zksync/forge ./foundry-zksync/cast + echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -123,18 +124,18 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash + - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - zkt || true + ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup + zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json - zk_supervisor contracts + zkstack dev contracts - name: Upload contracts uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 @@ -143,129 +144,129 @@ jobs: path: | ./contracts - build-images: - name: Build and Push Docker Images - needs: prepare-contracts - runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} - strategy: - matrix: - components: - - contract-verifier - - verified-sources-fetcher - platforms: - - linux/amd64 - - steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - with: - submodules: "recursive" - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - - - name: Setup env - shell: bash - run: | - echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - echo CI=1 >> $GITHUB_ENV - echo $(pwd)/bin >> $GITHUB_PATH - echo CI=1 >> .env - echo IN_DOCKER=1 >> .env - - - name: Download setup key - shell: bash - run: | - run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - - - name: Set env vars - shell: bash - run: | - echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV - echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV - # Support for custom tag suffix - if [ -n "${{ inputs.image_tag_suffix }}" ]; then - echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV - else - echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV - fi - - - name: Download contracts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - with: - name: contacts-verifier - path: | - ./contracts - - - name: login to Docker registries - if: ${{ inputs.action == 'push' }} - shell: bash - run: | - docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - gcloud auth configure-docker us-docker.pkg.dev -q - - - name: Build and push - uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 - with: - context: . - push: ${{ inputs.action == 'push' }} - file: docker/${{ matrix.components }}/Dockerfile - build-args: | - SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage - SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com - SCCACHE_GCS_RW_MODE=READ_WRITE - RUSTC_WRAPPER=sccache - tags: | - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest - matterlabs/${{ matrix.components }}:latest - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 - matterlabs/${{ matrix.components }}:latest2.0 - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} - - create_manifest: - name: Create release manifest - runs-on: matterlabs-ci-runner - needs: build-images - if: ${{ inputs.action == 'push' }} - strategy: - matrix: - component: - - name: contract-verifier - platform: linux/amd64 - - name: verified-sources-fetcher - platform: linux/amd64 - env: - IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} - steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - - - name: login to Docker registries - run: | - docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - gcloud auth configure-docker us-docker.pkg.dev -q - - - name: Create Docker manifest - run: | - docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") - platforms=${{ matrix.component.platform }} - for repo in "${docker_repositories[@]}"; do - platform_tags="" - for platform in ${platforms//,/ }; do - platform=$(echo $platform | tr '/' '-') - platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" - done - for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do - docker manifest create ${manifest} ${platform_tags} - docker manifest push ${manifest} - done - done +# build-images: +# name: Build and Push Docker Images +# needs: prepare-contracts +# runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} +# strategy: +# matrix: +# components: +# - contract-verifier +# - verified-sources-fetcher +# platforms: +# - linux/amd64 +# +# steps: +# - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 +# with: +# submodules: "recursive" +# +# - name: Set up Docker Buildx +# uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 +# +# - name: Setup env +# shell: bash +# run: | +# echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV +# echo CI=1 >> $GITHUB_ENV +# echo $(pwd)/bin >> $GITHUB_PATH +# echo CI=1 >> .env +# echo IN_DOCKER=1 >> .env +# +# - name: Download setup key +# shell: bash +# run: | +# run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key +# +# - name: Set env vars +# shell: bash +# run: | +# echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV +# echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV +# # Support for custom tag suffix +# if [ -n "${{ inputs.image_tag_suffix }}" ]; then +# echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV +# else +# echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV +# fi +# +# - name: Download contracts +# uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 +# with: +# name: contacts-verifier +# path: | +# ./contracts +# +# - name: login to Docker registries +# if: ${{ inputs.action == 'push' }} +# shell: bash +# run: | +# docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} +# gcloud auth configure-docker us-docker.pkg.dev -q +# +# - name: Build and push +# uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 +# with: +# context: . +# push: ${{ inputs.action == 'push' }} +# file: docker/${{ matrix.components }}/Dockerfile +# build-args: | +# SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage +# SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com +# SCCACHE_GCS_RW_MODE=READ_WRITE +# RUSTC_WRAPPER=sccache +# tags: | +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest +# matterlabs/${{ matrix.components }}:latest +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 +# matterlabs/${{ matrix.components }}:latest2.0 +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} +# matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} +# matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} +# matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} +# matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} +# matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} +# matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} +# +# create_manifest: +# name: Create release manifest +# runs-on: matterlabs-ci-runner +# needs: build-images +# if: ${{ inputs.action == 'push' }} +# strategy: +# matrix: +# component: +# - name: contract-verifier +# platform: linux/amd64 +# - name: verified-sources-fetcher +# platform: linux/amd64 +# env: +# IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} +# steps: +# - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 +# +# - name: login to Docker registries +# run: | +# docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} +# gcloud auth configure-docker us-docker.pkg.dev -q +# +# - name: Create Docker manifest +# run: | +# docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") +# platforms=${{ matrix.component.platform }} +# for repo in "${docker_repositories[@]}"; do +# platform_tags="" +# for platform in ${platforms//,/ }; do +# platform=$(echo $platform | tr '/' '-') +# platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" +# done +# for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do +# docker manifest create ${manifest} ${platform_tags} +# docker manifest push ${manifest} +# done +# done diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml index fba6a68b8ee..c4aeb9180fd 100644 --- a/.github/workflows/new-build-core-template.yml +++ b/.github/workflows/new-build-core-template.yml @@ -43,6 +43,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo CI=1 >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $HOME/.local/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env @@ -78,39 +79,39 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' + run: | + mkdir ./foundry-zksync + curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz + tar zxf foundry_nightly_linux_amd64.tar.gz -C ./foundry-zksync + chmod +x ./foundry-zksync/forge ./foundry-zksync/cast + echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -128,18 +129,18 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash + - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - zkt || true + ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup + zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json - zk_supervisor contracts + zkstack dev contracts - name: Upload contracts uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml index 60c152213e6..5d42696c0b2 100644 --- a/.github/workflows/new-build-prover-template.yml +++ b/.github/workflows/new-build-prover-template.yml @@ -40,7 +40,7 @@ on: jobs: get-protocol-version: name: Get protocol version - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} steps: @@ -86,7 +86,7 @@ jobs: needs: get-protocol-version env: PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: components: @@ -96,6 +96,7 @@ jobs: - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor + - prover-autoscaler steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: @@ -166,7 +167,7 @@ jobs: copy-images: name: Copy images between docker registries - needs: [ build-images, get-protocol-version ] + needs: [build-images, get-protocol-version] env: PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} runs-on: matterlabs-ci-runner @@ -187,12 +188,12 @@ jobs: run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} - name: Login and push to Europe GAR run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 11a844fdd2b..18708420dab 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -39,7 +39,7 @@ jobs: - '!prover/**' setup: name: Setup - runs-on: [ matterlabs-deployer-stage ] + runs-on: [matterlabs-deployer-stage] outputs: image_tag_suffix: ${{ steps.generate-tag-suffix.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -58,10 +58,9 @@ jobs: run: | ./prover/extract-setup-data-keys.sh >> $GITHUB_OUTPUT - build-push-core-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-core-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -72,7 +71,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-tee-prover-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -84,7 +83,7 @@ jobs: build-push-contract-verifier: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -95,7 +94,7 @@ jobs: build-push-prover-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-prover-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -108,7 +107,7 @@ jobs: build-push-witness-generator-image-avx512: name: Build and push prover images with avx512 instructions - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-witness-generator-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -122,7 +121,7 @@ jobs: build-gar-prover-fri-gpu-and-circuit-prover-gpu-gar: name: Build GAR prover FRI GPU - needs: [ setup, build-push-prover-images ] + needs: [setup, build-push-prover-images] uses: ./.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index a57bed3006a..ae7e5ee671b 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -18,65 +18,66 @@ jobs: fetch-depth: 0 ref: ${{ github.base_ref }} - # - name: fetch PR branch - # run: | - # git remote add pr_repo ${{ github.event.pull_request.head.repo.clone_url }} - # git fetch pr_repo ${{ github.event.pull_request.head.ref }} - - # - name: fetch merge-base SHA - # id: merge_base - # run: echo "sha=$(git merge-base HEAD FETCH_HEAD)" >> $GITHUB_OUTPUT - - # - name: checkout divergence point - # run: git checkout ${{ steps.merge_base.outputs.sha }} --recurse-submodules - - # - name: setup-env - # run: | - # touch .env - # echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - # echo $(pwd)/bin >> $GITHUB_PATH - # echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env - # echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env - # echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env - # echo "RUSTC_WRAPPER=sccache" >> .env - - # - name: init - # run: | - # run_retried docker compose pull zk - # docker compose up -d zk - - # - name: run benchmarks on base branch - # shell: bash - # run: | - # ci_run zkt - # ci_run zk_supervisor contracts --system-contracts - # ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai - # ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes - - # - name: checkout PR - # run: | - # git checkout --force FETCH_HEAD --recurse-submodules - - # - name: run benchmarks on PR - # shell: bash - # run: | - # ci_run zkt - # ci_run zk_supervisor contracts --system-contracts - # ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai - # ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes - - # EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) - # echo "speedup<<$EOF" >> $GITHUB_OUTPUT - # ci_run cargo run --package vm-benchmark --release --bin compare_iai_results base-iai pr-iai base-opcodes pr-opcodes >> $GITHUB_OUTPUT - # echo "$EOF" >> $GITHUB_OUTPUT - # id: comparison - - # - name: Comment on PR - # uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 - # with: - # message: | - # ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} - # ${{ steps.comparison.outputs.speedup }} - # comment_tag: vm-performance-changes - # mode: recreate - # create_if_not_exists: ${{ steps.comparison.outputs.speedup != '' }} +# - name: fetch PR branch +# run: | +# git remote add pr_repo ${{ github.event.pull_request.head.repo.clone_url }} +# git fetch pr_repo ${{ github.event.pull_request.head.ref }} +# +# - name: fetch merge-base SHA +# id: merge_base +# run: echo "sha=$(git merge-base HEAD FETCH_HEAD)" >> $GITHUB_OUTPUT +# +# - name: checkout divergence point +# run: git checkout ${{ steps.merge_base.outputs.sha }} --recurse-submodules +# +# - name: setup-env +# run: | +# touch .env +# echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV +# echo $(pwd)/bin >> $GITHUB_PATH +# echo $(pwd)/zkstack_cli/zkstackup >> $GITHUB_PATH +# echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env +# echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env +# echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env +# echo "RUSTC_WRAPPER=sccache" >> .env +# +# - name: init +# run: | +# run_retried docker compose pull zk +# docker compose up -d zk +# +# - name: run benchmarks on base branch +# shell: bash +# run: | +# ci_run zkstackup -g --local +# ci_run zkstack dev contracts --system-contracts +# ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai +# ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes +# +# - name: checkout PR +# run: | +# git checkout --force FETCH_HEAD --recurse-submodules +# +# - name: run benchmarks on PR +# shell: bash +# run: | +# ci_run zkstackup -g --local +# ci_run zkstack dev contracts --system-contracts +# ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai +# ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes +# +# EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) +# echo "speedup<<$EOF" >> $GITHUB_OUTPUT +# ci_run cargo run --package vm-benchmark --release --bin compare_iai_results base-iai pr-iai base-opcodes pr-opcodes >> $GITHUB_OUTPUT +# echo "$EOF" >> $GITHUB_OUTPUT +# id: comparison +# +# - name: Comment on PR +# uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 +# with: +# message: | +# ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} +# ${{ steps.comparison.outputs.speedup }} +# comment_tag: vm-performance-changes +# mode: recreate +# create_if_not_exists: ${{ steps.comparison.outputs.speedup != '' }} diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 4c8c90a0d8f..d336a1472e4 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -33,8 +33,15 @@ jobs: run: | run_retried docker compose pull zk docker compose up -d zk - ci_run zkt - ci_run zk_supervisor contracts + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + + - name: build contracts + run: | + ci_run zkstack dev contracts - name: run benchmarks run: | diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 5a08dff178c..b9321c8f5d6 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -4,6 +4,12 @@ on: # Workflow dispatch, to allow building and pushing new environments. # It will NOT mark them as latest. workflow_dispatch: + inputs: + build_cuda: + description: "Build CUDA images or not" + type: boolean + required: false + default: false push: branches: @@ -43,10 +49,10 @@ jobs: - docker/zk-environment/Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_11_8: - - docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_12: - - docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile - .github/workflows/zk-environment-publish.yml get_short_sha: @@ -202,25 +208,25 @@ jobs: echo "should_run=$changed_files_output" >> "$GITHUB_OUTPUT" - name: Checkout code - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: submodules: "recursive" - name: Log in to US GAR - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - name: Log in to Docker Hub - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io @@ -228,19 +234,19 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 - name: Build and optionally push - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 with: - file: docker/zk-environment/20.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile - push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + file: docker/zk-environment/22.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile + push: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/main' ) || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-${{ matrix.cuda_version }}:latest matterlabs/zk-environment:cuda-${{ matrix.cuda_version }}-latest diff --git a/.gitignore b/.gitignore index d60a93bba74..ea01fe127aa 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,7 @@ Cargo.lock !/Cargo.lock !/infrastructure/zksync-crypto/Cargo.lock !/prover/Cargo.lock -!/zk_toolbox/Cargo.lock +!/zkstack_cli/Cargo.lock /etc/env/target/* /etc/env/.current @@ -116,9 +116,10 @@ hyperchain-*.yml prover/crates/bin/vk_setup_data_generator_server_fri/data/setup_* prover/data/keys/setup_* -# Zk Toolbox +# ZK Stack CLI chains/era/configs/* chains/gateway/* +chains/avail/* configs/* era-observability/ core/tests/ts-integration/deployments-zk diff --git a/Cargo.lock b/Cargo.lock index 9a51d81a257..629869af757 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -292,6 +292,20 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-compression" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" +dependencies = [ + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", +] + [[package]] name = "async-executor" version = "1.13.1" @@ -1313,14 +1327,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" +checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" dependencies = [ "derivative", "serde", - "zk_evm 0.150.5", - "zkevm_circuits 0.150.5", + "zk_evm 0.150.6", + "zkevm_circuits 0.150.6", ] [[package]] @@ -1380,11 +1394,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" +checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.6", "derivative", "rayon", "serde", @@ -1539,6 +1553,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "const-decoder" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b381abde2cdc1bc3817e394b24e05667a2dc89f37570cbd34d9c397d99e56e3f" +dependencies = [ + "compile-fmt", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -3303,6 +3326,12 @@ dependencies = [ "url", ] +[[package]] +name = "human-repr" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f58b778a5761513caf593693f8951c97a5b610841e754788400f32102eefdff1" + [[package]] name = "hyper" version = "0.14.30" @@ -5871,6 +5900,7 @@ version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ + "async-compression", "base64 0.22.1", "bytes", "encoding_rs", @@ -8294,13 +8324,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ + "async-compression", "bitflags 2.6.0", "bytes", + "futures-core", "http 1.1.0", "http-body 1.0.1", "http-body-util", "pin-project-lite", "tokio", + "tokio-util", "tower-layer", "tower-service", ] @@ -8728,6 +8761,7 @@ dependencies = [ "zksync_types", "zksync_utils", "zksync_vlog", + "zksync_vm2", ] [[package]] @@ -9326,9 +9360,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" +checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" dependencies = [ "anyhow", "lazy_static", @@ -9336,7 +9370,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.5", + "zk_evm_abstractions 0.150.6", ] [[package]] @@ -9367,15 +9401,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" +checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", ] [[package]] @@ -9424,9 +9458,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" +checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" dependencies = [ "arrayvec 0.7.6", "boojum", @@ -9438,7 +9472,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", "zksync_cs_derive", ] @@ -9486,9 +9520,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" +checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -9612,7 +9646,7 @@ dependencies = [ "anyhow", "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "futures 0.3.30", "itertools 0.10.5", "num_cpus", @@ -9624,7 +9658,7 @@ dependencies = [ "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -9633,6 +9667,7 @@ dependencies = [ "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", + "zksync_system_constants", "zksync_types", "zksync_utils", "zksync_web3_decl", @@ -9640,9 +9675,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -9666,8 +9701,12 @@ dependencies = [ "secrecy", "serde", "serde_json", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -9677,9 +9716,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e7199c07aa14d9c3319839b98ad0496aac6e72327e70ded77ddb66329766db" +checksum = "a8001633dee671134cf572175a6c4f817904ce5f8d92e9b51f49891c5184a831" dependencies = [ "anyhow", "async-trait", @@ -9699,9 +9738,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -9720,9 +9759,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db07f7329b29737d8fd6860b350c809ae1b56ad53e26a7d0eddf3664ccb9dacb" +checksum = "061546668dd779ecb08302d2c84a6419e0093ad42aaa279bf20a8fa2ffda1be4" dependencies = [ "anyhow", "async-trait", @@ -9742,9 +9781,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a89a2d60db1ccd41438d29724a8d0d57fcf9506eb4443ea4b9205fd78c9c8e59" +checksum = "4e9789b5be26d20511bd7930bd9916d91122ff6cb09a28898563152a52f9f5eb" dependencies = [ "anyhow", "async-trait", @@ -9752,6 +9791,7 @@ dependencies = [ "build_html", "bytesize", "http-body-util", + "human-repr", "hyper 1.4.1", "hyper-util", "im", @@ -9778,9 +9818,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -9800,9 +9840,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" +checksum = "b2b2aab4ed18b13cd584f4edcc2546c8da82f89ac62e525063e12935ff28c9be" dependencies = [ "anyhow", "async-trait", @@ -9820,9 +9860,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand 0.8.5", @@ -9989,14 +10029,17 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "backon", "base58", "blake2 0.10.6", "blake2b_simd", + "bytes", "flate2", "futures 0.3.30", "hex", "jsonrpsee 0.23.2", "parity-scale-codec", + "reqwest 0.12.7", "scale-encode", "serde", "serde_json", @@ -10050,10 +10093,13 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_consensus_storage", + "zksync_consensus_utils", "zksync_contracts", "zksync_db_connection", + "zksync_l1_contract_interface", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -10152,8 +10198,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "tokio", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -10180,7 +10226,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.27.0" +version = "25.0.0" dependencies = [ "anyhow", "assert_matches", @@ -10247,7 +10293,9 @@ dependencies = [ "rand 0.8.5", "reqwest 0.12.7", "serde", + "serde_json", "tokio", + "tracing", "url", "zksync_config", "zksync_types", @@ -10331,9 +10379,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" +checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" dependencies = [ "boojum", "derivative", @@ -10343,15 +10391,17 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.5", + "zkevm_circuits 0.150.6", ] [[package]] name = "zksync_l1_contract_interface" version = "0.1.0" dependencies = [ + "anyhow", "hex", "once_cell", + "rand 0.8.5", "serde", "serde_json", "serde_with", @@ -10470,20 +10520,21 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "ethabi", "hex", "itertools 0.10.5", "once_cell", + "pretty_assertions", + "test-casing", "thiserror", - "tokio", "tracing", "vise", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_contracts", "zksync_eth_signer", "zksync_mini_merkle_tree", @@ -10504,6 +10555,7 @@ dependencies = [ "async-trait", "axum", "chrono", + "const-decoder", "futures 0.3.30", "governor", "hex", @@ -10524,7 +10576,7 @@ dependencies = [ "tower-http", "tracing", "vise", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_config", "zksync_consensus_roles", "zksync_contracts", @@ -10623,7 +10675,6 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bigdecimal", "test-casing", "tokio", "tracing", @@ -10632,7 +10683,6 @@ dependencies = [ "zksync_dal", "zksync_eth_client", "zksync_types", - "zksync_utils", "zksync_web3_decl", ] @@ -10689,7 +10739,6 @@ dependencies = [ "zksync_state", "zksync_state_keeper", "zksync_storage", - "zksync_tee_verifier_input_producer", "zksync_types", "zksync_utils", "zksync_vlog", @@ -10757,6 +10806,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", + "backon", "chrono", "futures 0.3.30", "once_cell", @@ -10848,6 +10898,7 @@ dependencies = [ "serde_json", "tokio", "tower 0.4.13", + "tower-http", "tracing", "vise", "zksync_basic_types", @@ -10858,13 +10909,15 @@ dependencies = [ "zksync_object_store", "zksync_prover_interface", "zksync_types", + "zksync_utils", + "zksync_vm_executor", ] [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -10883,9 +10936,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck 0.5.0", @@ -10909,6 +10962,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -10923,7 +10977,7 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "serde", "serde_json", "serde_with", @@ -11166,6 +11220,8 @@ name = "zksync_tee_verifier" version = "0.1.0" dependencies = [ "anyhow", + "bincode", + "once_cell", "serde", "tracing", "zksync_config", @@ -11173,29 +11229,9 @@ dependencies = [ "zksync_crypto_primitives", "zksync_merkle_tree", "zksync_multivm", - "zksync_object_store", - "zksync_prover_interface", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_tee_verifier_input_producer" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "tokio", - "tracing", - "vise", - "zksync_dal", - "zksync_object_store", "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_tee_verifier", "zksync_types", "zksync_utils", - "zksync_vm_executor", ] [[package]] @@ -11231,7 +11267,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -11240,7 +11275,6 @@ dependencies = [ "tokio", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", @@ -11301,20 +11335,20 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", + "zk_evm_abstractions 0.150.6", + "zkevm_opcode_defs 0.150.6", "zksync_vm2_interface", ] [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "primitive-types", ] @@ -11324,6 +11358,7 @@ name = "zksync_vm_executor" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "async-trait", "once_cell", "tokio", @@ -11413,6 +11448,24 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zstd" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.13+zstd.1.5.6" diff --git a/Cargo.toml b/Cargo.toml index 94fadb25968..0f8e6ba77ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,85 +1,84 @@ [workspace] members = [ - # Binaries - "core/bin/block_reverter", - "core/bin/contract-verifier", - "core/bin/external_node", - "core/bin/merkle_tree_consistency_checker", - "core/bin/snapshots_creator", - "core/bin/selector_generator", - "core/bin/system-constants-generator", - "core/bin/verified_sources_fetcher", - "core/bin/zksync_server", - "core/bin/genesis_generator", - "core/bin/zksync_tee_prover", - # Node services - "core/node/node_framework", - "core/node/proof_data_handler", - "core/node/block_reverter", - "core/node/commitment_generator", - "core/node/house_keeper", - "core/node/genesis", - "core/node/shared_metrics", - "core/node/db_pruner", - "core/node/fee_model", - "core/node/da_dispatcher", - "core/node/eth_sender", - "core/node/vm_runner", - "core/node/test_utils", - "core/node/state_keeper", - "core/node/reorg_detector", - "core/node/consistency_checker", - "core/node/metadata_calculator", - "core/node/node_sync", - "core/node/node_storage_init", - "core/node/consensus", - "core/node/contract_verification_server", - "core/node/api_server", - "core/node/tee_verifier_input_producer", - "core/node/base_token_adjuster", - "core/node/external_proof_integration_api", - "core/node/logs_bloom_backfill", - "core/node/da_clients", - # Libraries - "core/lib/db_connection", - "core/lib/zksync_core_leftovers", - "core/lib/basic_types", - "core/lib/config", - "core/lib/constants", - "core/lib/contract_verifier", - "core/lib/contracts", - "core/lib/circuit_breaker", - "core/lib/dal", - "core/lib/env_config", - "core/lib/da_client", - "core/lib/eth_client", - "core/lib/eth_signer", - "core/lib/l1_contract_interface", - "core/lib/mempool", - "core/lib/merkle_tree", - "core/lib/mini_merkle_tree", - "core/lib/node_framework_derive", - "core/lib/object_store", - "core/lib/prover_interface", - "core/lib/queued_job_processor", - "core/lib/state", - "core/lib/storage", - "core/lib/tee_verifier", - "core/lib/types", - "core/lib/protobuf_config", - "core/lib/utils", - "core/lib/vlog", - "core/lib/multivm", - "core/lib/vm_interface", - "core/lib/vm_executor", - "core/lib/web3_decl", - "core/lib/snapshots_applier", - "core/lib/crypto_primitives", - "core/lib/external_price_api", - # Test infrastructure - "core/tests/test_account", - "core/tests/loadnext", - "core/tests/vm-benchmark", + # Binaries + "core/bin/block_reverter", + "core/bin/contract-verifier", + "core/bin/external_node", + "core/bin/merkle_tree_consistency_checker", + "core/bin/snapshots_creator", + "core/bin/selector_generator", + "core/bin/system-constants-generator", + "core/bin/verified_sources_fetcher", + "core/bin/zksync_server", + "core/bin/genesis_generator", + "core/bin/zksync_tee_prover", + # Node services + "core/node/node_framework", + "core/node/proof_data_handler", + "core/node/block_reverter", + "core/node/commitment_generator", + "core/node/house_keeper", + "core/node/genesis", + "core/node/shared_metrics", + "core/node/db_pruner", + "core/node/fee_model", + "core/node/da_dispatcher", + "core/node/eth_sender", + "core/node/vm_runner", + "core/node/test_utils", + "core/node/state_keeper", + "core/node/reorg_detector", + "core/node/consistency_checker", + "core/node/metadata_calculator", + "core/node/node_sync", + "core/node/node_storage_init", + "core/node/consensus", + "core/node/contract_verification_server", + "core/node/api_server", + "core/node/base_token_adjuster", + "core/node/external_proof_integration_api", + "core/node/logs_bloom_backfill", + "core/node/da_clients", + # Libraries + "core/lib/db_connection", + "core/lib/zksync_core_leftovers", + "core/lib/basic_types", + "core/lib/config", + "core/lib/constants", + "core/lib/contract_verifier", + "core/lib/contracts", + "core/lib/circuit_breaker", + "core/lib/dal", + "core/lib/env_config", + "core/lib/da_client", + "core/lib/eth_client", + "core/lib/eth_signer", + "core/lib/l1_contract_interface", + "core/lib/mempool", + "core/lib/merkle_tree", + "core/lib/mini_merkle_tree", + "core/lib/node_framework_derive", + "core/lib/object_store", + "core/lib/prover_interface", + "core/lib/queued_job_processor", + "core/lib/state", + "core/lib/storage", + "core/lib/tee_verifier", + "core/lib/types", + "core/lib/protobuf_config", + "core/lib/utils", + "core/lib/vlog", + "core/lib/multivm", + "core/lib/vm_interface", + "core/lib/vm_executor", + "core/lib/web3_decl", + "core/lib/snapshots_applier", + "core/lib/crypto_primitives", + "core/lib/external_price_api", + # Test infrastructure + "core/tests/test_account", + "core/tests/loadnext", + "core/tests/vm-benchmark", ] resolver = "2" @@ -111,9 +110,11 @@ backon = "0.4.4" bigdecimal = "0.4.5" bincode = "1" blake2 = "0.10" +bytes = "1" chrono = "0.4" clap = "4.2.2" codegen = "0.2.0" +const-decoder = "0.4.0" criterion = "0.4.0" ctrlc = "3.1" dashmap = "5.5.3" @@ -155,7 +156,7 @@ rayon = "1.3.1" regex = "1" reqwest = "0.12" rlp = "0.5" -rocksdb = "0.21.0" +rocksdb = "0.21" rustc_version = "0.4.0" rustls = "0.23" secp256k1 = { version = "0.27.0", features = ["recovery", "global-context"] } @@ -172,6 +173,7 @@ sqlx = "0.8.1" static_assertions = "1.1" structopt = "0.3.20" strum = "0.26" +strum_macros = "0.26.4" tempfile = "3.0.2" test-casing = "0.1.2" test-log = "0.2.15" @@ -185,7 +187,7 @@ tower-http = "0.5.2" tracing = "0.1" tracing-subscriber = "0.3" tracing-opentelemetry = "0.25.0" -time = "0.3.36" # Has to be same as used by `tracing-subscriber` +time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" fraction = "0.15.3" @@ -217,30 +219,30 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.5" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.6" } crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.30.1" } -kzg = { package = "zksync_kzg", version = "=0.150.5" } +kzg = { package = "zksync_kzg", version = "=0.150.6" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } zk_evm_1_3_3 = { package = "zk_evm", version = "0.133" } zk_evm_1_4_0 = { package = "zk_evm", version = "0.140" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.6" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "74577d9be13b1bff9d1a712389731f669b179e47" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "df5bec3d04d64d434f9b0ccb285ba4681008f7b3" } # Consensus dependencies. -zksync_concurrency = "=0.3.0" -zksync_consensus_bft = "=0.3.0" -zksync_consensus_crypto = "=0.3.0" -zksync_consensus_executor = "=0.3.0" -zksync_consensus_network = "=0.3.0" -zksync_consensus_roles = "=0.3.0" -zksync_consensus_storage = "=0.3.0" -zksync_consensus_utils = "=0.3.0" -zksync_protobuf = "=0.3.0" -zksync_protobuf_build = "=0.3.0" +zksync_concurrency = "=0.5.0" +zksync_consensus_bft = "=0.5.0" +zksync_consensus_crypto = "=0.5.0" +zksync_consensus_executor = "=0.5.0" +zksync_consensus_network = "=0.5.0" +zksync_consensus_roles = "=0.5.0" +zksync_consensus_storage = "=0.5.0" +zksync_consensus_utils = "=0.5.0" +zksync_protobuf = "=0.5.0" +zksync_protobuf_build = "=0.5.0" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } @@ -307,6 +309,5 @@ zksync_node_storage_init = { version = "0.1.0", path = "core/node/node_storage_i zksync_node_consensus = { version = "0.1.0", path = "core/node/consensus" } zksync_contract_verification_server = { version = "0.1.0", path = "core/node/contract_verification_server" } zksync_node_api_server = { version = "0.1.0", path = "core/node/api_server" } -zksync_tee_verifier_input_producer = { version = "0.1.0", path = "core/node/tee_verifier_input_producer" } zksync_base_token_adjuster = { version = "0.1.0", path = "core/node/base_token_adjuster" } zksync_logs_bloom_backfill = { version = "0.1.0", path = "core/node/logs_bloom_backfill" } diff --git a/bin/ci_localnet_up b/bin/ci_localnet_up index 8673a909af7..c399de410d7 100755 --- a/bin/ci_localnet_up +++ b/bin/ci_localnet_up @@ -4,6 +4,5 @@ set -e cd $ZKSYNC_HOME -mkdir -p ./volumes/postgres ./volumes/reth/data run_retried docker-compose pull docker-compose --profile runner up -d --wait diff --git a/bin/prover_checkers/batch_availability_checker b/bin/prover_checkers/batch_availability_checker new file mode 100644 index 00000000000..ae7aade2f68 --- /dev/null +++ b/bin/prover_checkers/batch_availability_checker @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check availability for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM witness_inputs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + echo "Batch is not available yet. Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/batch_l1_status_checker b/bin/prover_checkers/batch_l1_status_checker new file mode 100755 index 00000000000..24f26e354ea --- /dev/null +++ b/bin/prover_checkers/batch_l1_status_checker @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Needs following configuration +# URL - URL of the API endpoint +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +echo "URL: $URL" + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the curl request and capture the response + RESPONSE=$(curl --silent --request POST \ + --url $URL \ + --header 'Content-Type: application/json' \ + --data '{ + "jsonrpc": "2.0", + "id": 1, + "method": "zks_getBlockDetails", + "params": [1] + }') + + # Parse the executedAt field using jq + EXECUTED_AT=$(echo $RESPONSE | jq -r '.result.executedAt') + + # Check if executedAt is not null + if [ "$EXECUTED_AT" != "null" ] && [ -n "$EXECUTED_AT" ]; then + echo "executedAt is not null: $EXECUTED_AT" + echo "true" + exit 0 # Exit with zero status to succeed CI + else + DATABASE_STATUS=$(psql $DATABASE_URL -c "SELECT status FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + echo "executedAt is null, database status is $DATABASE_STATUS, retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/kill_prover b/bin/prover_checkers/kill_prover new file mode 100644 index 00000000000..2a65aea2d67 --- /dev/null +++ b/bin/prover_checkers/kill_prover @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Use pkill to find and kill processes using circuit prover +if ! pkill -f 'zksync_circuit_prover|zkstack prover run --component=circuit-prover'; then + echo "No processes are currently using the GPU." + exit 0 +fi + +echo "All GPU-related processes have been killed." diff --git a/bin/prover_checkers/prover_jobs_status_checker b/bin/prover_checkers/prover_jobs_status_checker new file mode 100755 index 00000000000..6816d9a2d14 --- /dev/null +++ b/bin/prover_checkers/prover_jobs_status_checker @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check readiness for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER AND status = 'queued';" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + STATUS=$(psql $DATABASE_URL -c "SELECT COUNT(*), status FROM prover_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER GROUP BY status;" -t -A) + echo "Current status is $STATUS" + echo "Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/run_on_all_chains.sh b/bin/run_on_all_chains.sh new file mode 100755 index 00000000000..68b6e81662f --- /dev/null +++ b/bin/run_on_all_chains.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# Colors for the terminal output +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color + + +command=$1 +chain_list=$2 +log_dir=$3 +IFS=',' read -r -a chains <<< "$chain_list" +pids=() +statuses=() + +# Start background processes +for i in "${!chains[@]}"; do + eval "$command --chain ${chains[$i]} &> ${log_dir}/${chains[$i]}.log" & + pids+=($!) +done + +# Wait for all processes to complete and capture their exit statuses +for i in "${!pids[@]}"; do + wait ${pids[$i]} + statuses[$i]=$? +done + +# Check exit statuses and set overall status +overall_status=0 + +for i in "${!statuses[@]}"; do + if [ ${statuses[$i]} -ne 0 ]; then + overall_status=1 + echo -e "${RED}✗ ERROR (exit code ${statuses[$i]}): ${chains[$i]}${NC}" + else + echo -e "${GREEN}✓ SUCCESS: ${chains[$i]}${NC}" + fi +done + +# Exit with overall status +exit $overall_status diff --git a/bin/zk b/bin/zk index 868c4e338cd..f3b927de8f8 100755 --- a/bin/zk +++ b/bin/zk @@ -39,6 +39,7 @@ check_yarn_version() { # and it will be hard for them to see what went wrong. check_subdirectory check_yarn_version + if [ -z "$1" ]; then cd $ZKSYNC_HOME run_retried yarn install --frozen-lockfile && yarn utils build && yarn zk build diff --git a/bin/zkt b/bin/zkt deleted file mode 100755 index f781ca67528..00000000000 --- a/bin/zkt +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -cd $(dirname $0) - -if which zkup >/dev/null; then - cargo uninstall zk_inception - cargo uninstall zk_supervisor - git config --local core.hooksPath || - git config --local core.hooksPath ./.githooks - zkup -p .. --alias -else - echo zkup is not installed, please install it https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup - cd ../zk_toolbox - cargo install --path ./crates/zk_inception --force - cargo install --path ./crates/zk_supervisor --force -fi - diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 6cf2ff4419a..56239303cd4 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,103 @@ # Changelog +## [25.0.0](https://github.com/matter-labs/zksync-era/compare/core-v24.29.0...core-v25.0.0) (2024-10-23) + + +### ⚠ BREAKING CHANGES + +* **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) + +### Features + +* Add CoinMarketCap external API ([#2971](https://github.com/matter-labs/zksync-era/issues/2971)) ([c1cb30e](https://github.com/matter-labs/zksync-era/commit/c1cb30e59ca1d0b5fea5fe0980082aea0eb04aa2)) +* **api:** Implement eth_maxPriorityFeePerGas ([#3135](https://github.com/matter-labs/zksync-era/issues/3135)) ([35e84cc](https://github.com/matter-labs/zksync-era/commit/35e84cc03a7fdd315932fb3020fe41c95a6e4bca)) +* **api:** Make acceptable values cache lag configurable ([#3028](https://github.com/matter-labs/zksync-era/issues/3028)) ([6747529](https://github.com/matter-labs/zksync-era/commit/67475292ff770d2edd6884be27f976a4144778ae)) +* **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) ([c60a348](https://github.com/matter-labs/zksync-era/commit/c60a3482ee09b3e371163e62f49e83bc6d6f4548)) +* **external-node:** save protocol version before opening a batch ([#3136](https://github.com/matter-labs/zksync-era/issues/3136)) ([d6de4f4](https://github.com/matter-labs/zksync-era/commit/d6de4f40ddce339c760c95e2bf4b8aceb571af7f)) +* Prover e2e test ([#2975](https://github.com/matter-labs/zksync-era/issues/2975)) ([0edd796](https://github.com/matter-labs/zksync-era/commit/0edd7962429b3530ae751bd7cc947c97193dd0ca)) +* **prover:** Add min_provers and dry_run features. Improve metrics and test. ([#3129](https://github.com/matter-labs/zksync-era/issues/3129)) ([7c28964](https://github.com/matter-labs/zksync-era/commit/7c289649b7b3c418c7193a35b51c264cf4970f3c)) +* **tee_verifier:** speedup SQL query for new jobs ([#3133](https://github.com/matter-labs/zksync-era/issues/3133)) ([30ceee8](https://github.com/matter-labs/zksync-era/commit/30ceee8a48046e349ff0234ebb24d468a0e0876c)) +* vm2 tracers can access storage ([#3114](https://github.com/matter-labs/zksync-era/issues/3114)) ([e466b52](https://github.com/matter-labs/zksync-era/commit/e466b52948e3c4ed1cb5af4fd999a52028e4d216)) +* **vm:** Return compressed bytecodes from `push_transaction()` ([#3126](https://github.com/matter-labs/zksync-era/issues/3126)) ([37f209f](https://github.com/matter-labs/zksync-era/commit/37f209fec8e7cb65c0e60003d46b9ea69c43caf1)) + + +### Bug Fixes + +* **call_tracer:** Flat call tracer fixes for blocks ([#3095](https://github.com/matter-labs/zksync-era/issues/3095)) ([30ddb29](https://github.com/matter-labs/zksync-era/commit/30ddb292977340beab37a81f75c35480cbdd59d3)) +* **consensus:** preventing config update reverts ([#3148](https://github.com/matter-labs/zksync-era/issues/3148)) ([caee55f](https://github.com/matter-labs/zksync-era/commit/caee55fef4eed0ec58cceaeba277bbdedf5c6f51)) +* **en:** Return `SyncState` health check ([#3142](https://github.com/matter-labs/zksync-era/issues/3142)) ([abeee81](https://github.com/matter-labs/zksync-era/commit/abeee8190d3c3a5e577d71024bdfb30ff516ad03)) +* **external-node:** delete empty unsealed batch on EN initialization ([#3125](https://github.com/matter-labs/zksync-era/issues/3125)) ([5d5214b](https://github.com/matter-labs/zksync-era/commit/5d5214ba983823b306495d34fdd1d46abacce07a)) +* Fix counter metric type to be Counter. ([#3153](https://github.com/matter-labs/zksync-era/issues/3153)) ([08a3fe7](https://github.com/matter-labs/zksync-era/commit/08a3fe7ffd0410c51334193068649905337d5e84)) +* **mempool:** minor mempool improvements ([#3113](https://github.com/matter-labs/zksync-era/issues/3113)) ([cd16083](https://github.com/matter-labs/zksync-era/commit/cd160830a0b7ebe5af4ecbd944da1cd51af3528a)) +* **prover:** Run for zero queue to allow scaling down to 0 ([#3115](https://github.com/matter-labs/zksync-era/issues/3115)) ([bbe1919](https://github.com/matter-labs/zksync-era/commit/bbe191937fa5c5711a7164fd4f0c2ae65cda0833)) +* restore instruction count functionality ([#3081](https://github.com/matter-labs/zksync-era/issues/3081)) ([6159f75](https://github.com/matter-labs/zksync-era/commit/6159f7531a0340a69c4926c4e0325811ed7cabb8)) +* **state-keeper:** save call trace for upgrade txs ([#3132](https://github.com/matter-labs/zksync-era/issues/3132)) ([e1c363f](https://github.com/matter-labs/zksync-era/commit/e1c363f8f5e03c8d62bba1523f17b87d6a0e25ad)) +* **tee_prover:** add zstd compression ([#3144](https://github.com/matter-labs/zksync-era/issues/3144)) ([7241ae1](https://github.com/matter-labs/zksync-era/commit/7241ae139b2b6bf9a9966eaa2f22203583a3786f)) +* **tee_verifier:** correctly initialize storage for re-execution ([#3017](https://github.com/matter-labs/zksync-era/issues/3017)) ([9d88373](https://github.com/matter-labs/zksync-era/commit/9d88373f1b745c489e98e5ef542644a70e815498)) + +## [24.29.0](https://github.com/matter-labs/zksync-era/compare/core-v24.28.0...core-v24.29.0) (2024-10-14) + + +### Features + +* Add initial version prover_autoscaler ([#2993](https://github.com/matter-labs/zksync-era/issues/2993)) ([ebf9604](https://github.com/matter-labs/zksync-era/commit/ebf9604c5ab2a1cae1ffd2f9c922f35a1d0ad876)) +* add metric to track current cbt ratio ([#3020](https://github.com/matter-labs/zksync-era/issues/3020)) ([3fd2fb1](https://github.com/matter-labs/zksync-era/commit/3fd2fb14e7283c6858731e162522e70051a8e162)) +* **configs:** Add port parameter to ConsensusConfig ([#2986](https://github.com/matter-labs/zksync-era/issues/2986)) ([25112df](https://github.com/matter-labs/zksync-era/commit/25112df39d052f083bc45964f0298b3af5842cac)) +* **configs:** Add port parameter to ConsensusConfig ([#3051](https://github.com/matter-labs/zksync-era/issues/3051)) ([038c397](https://github.com/matter-labs/zksync-era/commit/038c397ce842601da5109c460b09dbf9d51cf2fc)) +* **consensus:** smooth transition to p2p syncing (BFT-515) ([#3075](https://github.com/matter-labs/zksync-era/issues/3075)) ([5d339b4](https://github.com/matter-labs/zksync-era/commit/5d339b46fee66bc3a45493586626d318380680dd)) +* **consensus:** Support for syncing blocks before consensus genesis over p2p network ([#3040](https://github.com/matter-labs/zksync-era/issues/3040)) ([d3edc3d](https://github.com/matter-labs/zksync-era/commit/d3edc3d817c151ed00d4fa822fdae0a746e33356)) +* **en:** periodically fetch bridge addresses ([#2949](https://github.com/matter-labs/zksync-era/issues/2949)) ([e984bfb](https://github.com/matter-labs/zksync-era/commit/e984bfb8a243bc746549ab9347dc0a367fe02790)) +* **eth-sender:** add time_in_mempool_cap config ([#3018](https://github.com/matter-labs/zksync-era/issues/3018)) ([f6d86bd](https://github.com/matter-labs/zksync-era/commit/f6d86bd7935a1cdbb528b13437424031fda3cb8e)) +* **eth-watch:** catch another reth error ([#3026](https://github.com/matter-labs/zksync-era/issues/3026)) ([4640c42](https://github.com/matter-labs/zksync-era/commit/4640c4233af46c97f207d2dbce5fedd1bcb66c43)) +* Handle new yul compilation flow ([#3038](https://github.com/matter-labs/zksync-era/issues/3038)) ([4035361](https://github.com/matter-labs/zksync-era/commit/40353616f278800dc80fcbe5f2a6483019033b20)) +* **state-keeper:** pre-insert unsealed L1 batches ([#2846](https://github.com/matter-labs/zksync-era/issues/2846)) ([e5b5a3b](https://github.com/matter-labs/zksync-era/commit/e5b5a3b7b62e8d4035fe89c2a287bf3606d17bc5)) +* **vm:** EVM emulator support – base ([#2979](https://github.com/matter-labs/zksync-era/issues/2979)) ([deafa46](https://github.com/matter-labs/zksync-era/commit/deafa460715334a77edf9fe8aa76fa90029342c4)) +* **zk_toolbox:** added support for setting attester committee defined in a separate file ([#2992](https://github.com/matter-labs/zksync-era/issues/2992)) ([6105514](https://github.com/matter-labs/zksync-era/commit/610551427d5ab129f91e69b5efb318da917457d7)) +* **zk_toolbox:** Redesign zk_toolbox commands ([#3003](https://github.com/matter-labs/zksync-era/issues/3003)) ([114834f](https://github.com/matter-labs/zksync-era/commit/114834f357421c62d596a1954fac8ce615cfde49)) +* **zktoolbox:** added checking the contract owner in set-attester-committee command ([#3061](https://github.com/matter-labs/zksync-era/issues/3061)) ([9b0a606](https://github.com/matter-labs/zksync-era/commit/9b0a6067923c5276f560f3abccedc4e6a5167dda)) + + +### Bug Fixes + +* **api:** Accept integer block count in `eth_feeHistory` ([#3077](https://github.com/matter-labs/zksync-era/issues/3077)) ([4d527d4](https://github.com/matter-labs/zksync-era/commit/4d527d4b44b6b083e2a813d48c79d8021ea6f843)) +* **api:** Adapt `eth_getCode` to EVM emulator ([#3073](https://github.com/matter-labs/zksync-era/issues/3073)) ([15fe5a6](https://github.com/matter-labs/zksync-era/commit/15fe5a62f03cd103afd7fa5eb03e27db25686ba9)) +* bincode deserialization for VM run data ([#3044](https://github.com/matter-labs/zksync-era/issues/3044)) ([b0ec79f](https://github.com/matter-labs/zksync-era/commit/b0ec79fcb7fa120f095d987f53c67fdab92e2c79)) +* bincode deserialize for WitnessInputData ([#3055](https://github.com/matter-labs/zksync-era/issues/3055)) ([91d0595](https://github.com/matter-labs/zksync-era/commit/91d0595631cc5f5bffc42a4b04d5015d2be659b1)) +* **external-node:** make fetcher rely on unsealed batches ([#3088](https://github.com/matter-labs/zksync-era/issues/3088)) ([bb5d147](https://github.com/matter-labs/zksync-era/commit/bb5d1470d5e1e8e69d9b79c60284ea8adaee4038)) +* **state-keeper:** ensure unsealed batch is present during IO init ([#3071](https://github.com/matter-labs/zksync-era/issues/3071)) ([bdeb411](https://github.com/matter-labs/zksync-era/commit/bdeb411c593ac3d5e16158e64c4210bb00edcb0c)) +* **vm:** Check protocol version for fast VM ([#3080](https://github.com/matter-labs/zksync-era/issues/3080)) ([a089f3f](https://github.com/matter-labs/zksync-era/commit/a089f3feb916ccc9007d9c32ec909db694b7d9f4)) +* **vm:** Prepare new VM for use in API server and fix divergences ([#2994](https://github.com/matter-labs/zksync-era/issues/2994)) ([741b77e](https://github.com/matter-labs/zksync-era/commit/741b77e080f75c6a93d3ee779b1c9ce4297618f9)) + + +### Reverts + +* **configs:** Add port parameter to ConsensusConfig ([#2986](https://github.com/matter-labs/zksync-era/issues/2986)) ([#3046](https://github.com/matter-labs/zksync-era/issues/3046)) ([abe35bf](https://github.com/matter-labs/zksync-era/commit/abe35bf7aea1120b77fdbd413d927e45da48d26c)) + +## [24.28.0](https://github.com/matter-labs/zksync-era/compare/core-v24.27.0...core-v24.28.0) (2024-10-02) + + +### Features + +* **da-clients:** add secrets ([#2954](https://github.com/matter-labs/zksync-era/issues/2954)) ([f4631e4](https://github.com/matter-labs/zksync-era/commit/f4631e4466de620cc1401b326d864cdb8b48a05d)) +* **eth-sender:** add a cap to time_in_mempool ([#2978](https://github.com/matter-labs/zksync-era/issues/2978)) ([650d42f](https://github.com/matter-labs/zksync-era/commit/650d42fea6124d80b60a8270a303d72ad6ac741e)) +* **eth-watch:** redesign to support multiple chains ([#2867](https://github.com/matter-labs/zksync-era/issues/2867)) ([aa72d84](https://github.com/matter-labs/zksync-era/commit/aa72d849c24a664acd083eba73795ddc5d31d55f)) +* Expose http debug page ([#2952](https://github.com/matter-labs/zksync-era/issues/2952)) ([e0b6488](https://github.com/matter-labs/zksync-era/commit/e0b64888aae7324aec2d40fa0cd51ea7e1450cd9)) +* **zk_toolbox:** add fees integration test to toolbox ([#2898](https://github.com/matter-labs/zksync-era/issues/2898)) ([e7ead76](https://github.com/matter-labs/zksync-era/commit/e7ead760ce0417dd36af3839ac557f7e9ab238a4)) +* **zk_toolbox:** Add SQL format for zk supervisor ([#2950](https://github.com/matter-labs/zksync-era/issues/2950)) ([540e5d7](https://github.com/matter-labs/zksync-era/commit/540e5d7554f54e80d52f1bfae37e03ca8f787baf)) + + +### Bug Fixes + +* **api:** Fix batch fee input for `debug` namespace ([#2948](https://github.com/matter-labs/zksync-era/issues/2948)) ([79b6fcf](https://github.com/matter-labs/zksync-era/commit/79b6fcf8b5d10a0ccdceb846370dd6870b6a32b5)) +* chainstack block limit exceeded ([#2974](https://github.com/matter-labs/zksync-era/issues/2974)) ([4ffbf42](https://github.com/matter-labs/zksync-era/commit/4ffbf426de166c11aaf5d7b5ed7d199644fba229)) +* **eth-watch:** add missing check that from_block is not larger than finalized_block ([#2969](https://github.com/matter-labs/zksync-era/issues/2969)) ([3f406c7](https://github.com/matter-labs/zksync-era/commit/3f406c7d0c0e76d798c2d838abde57ca692822c0)) +* ignore unknown fields in rpc json response ([#2962](https://github.com/matter-labs/zksync-era/issues/2962)) ([692ea73](https://github.com/matter-labs/zksync-era/commit/692ea73f75a5fb9db2b4ac33ad24d20568638742)) + + +### Performance Improvements + +* **api:** More efficient gas estimation ([#2937](https://github.com/matter-labs/zksync-era/issues/2937)) ([3b69e37](https://github.com/matter-labs/zksync-era/commit/3b69e37e470dab859a55787f6cc971e7083de2fd)) + ## [24.27.0](https://github.com/matter-labs/zksync-era/compare/core-v24.26.0...core-v24.27.0) (2024-09-25) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index d841ee5b42e..4e3dc548cf8 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.27.0" # x-release-please-version +version = "25.0.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index af26d5e80ce..420a6941c81 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -462,6 +462,8 @@ pub(crate) struct OptionalENConfig { /// Gateway RPC URL, needed for operating during migration. #[allow(dead_code)] pub gateway_url: Option, + /// Interval for bridge addresses refreshing in seconds. + bridge_addresses_refresh_interval_sec: Option, } impl OptionalENConfig { @@ -692,6 +694,7 @@ impl OptionalENConfig { api_namespaces, contracts_diamond_proxy_addr: None, gateway_url: enconfig.gateway_url.clone(), + bridge_addresses_refresh_interval_sec: enconfig.bridge_addresses_refresh_interval_sec, }) } @@ -918,6 +921,11 @@ impl OptionalENConfig { Duration::from_secs(self.pruning_data_retention_sec) } + pub fn bridge_addresses_refresh_interval(&self) -> Option { + self.bridge_addresses_refresh_interval_sec + .map(|n| Duration::from_secs(n.get())) + } + #[cfg(test)] fn mock() -> Self { // Set all values to their defaults @@ -1416,9 +1424,9 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { l2_erc20_default_bridge: config.remote.l2_erc20_bridge_addr, l1_shared_default_bridge: config.remote.l1_shared_bridge_proxy_addr, l2_shared_default_bridge: config.remote.l2_shared_bridge_addr, + l2_legacy_shared_bridge: config.remote.l2_legacy_shared_bridge_addr, l1_weth_bridge: config.remote.l1_weth_bridge_addr, l2_weth_bridge: config.remote.l2_weth_bridge_addr, - l2_legacy_shared_bridge: config.remote.l2_legacy_shared_bridge_addr, }, bridgehub_proxy_addr: config.remote.bridgehub_proxy_addr, state_transition_proxy_addr: config.remote.state_transition_proxy_addr, diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index e6284cb7f24..3a43d9d492d 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -11,7 +11,9 @@ use zksync_config::{ }, PostgresConfig, }; -use zksync_metadata_calculator::{MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig}; +use zksync_metadata_calculator::{ + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, +}; use zksync_node_api_server::web3::Namespace; use zksync_node_framework::{ implementations::layers::{ @@ -25,7 +27,7 @@ use zksync_node_framework::{ logs_bloom_backfill::LogsBloomBackfillLayer, main_node_client::MainNodeClientLayer, main_node_fee_params_fetcher::MainNodeFeeParamsFetcherLayer, - metadata_calculator::MetadataCalculatorLayer, + metadata_calculator::{MetadataCalculatorLayer, TreeApiServerLayer}, node_storage_init::{ external_node_strategy::{ExternalNodeInitStrategyLayer, SnapshotRecoveryConfig}, NodeStorageInitializerLayer, @@ -55,6 +57,7 @@ use zksync_node_framework::{ service::{ZkStackService, ZkStackServiceBuilder}, }; use zksync_state::RocksdbStorageOptions; +use zksync_types::L2_NATIVE_TOKEN_VAULT_ADDRESS; use crate::{config::ExternalNodeConfig, metrics::framework::ExternalNodeMetricsLayer, Component}; @@ -191,8 +194,22 @@ impl ExternalNodeBuilder { // compression. const OPTIONAL_BYTECODE_COMPRESSION: bool = true; + let l2_shared_bridge_addr = self + .config + .remote + .l2_shared_bridge_addr + .context("Missing `l2_shared_bridge_addr`")?; + let l2_legacy_shared_bridge_addr = if l2_shared_bridge_addr == L2_NATIVE_TOKEN_VAULT_ADDRESS + { + // System has migrated to `L2_NATIVE_TOKEN_VAULT_ADDRESS`, use legacy shared bridge address from main node. + self.config.remote.l2_legacy_shared_bridge_addr + } else { + // System hasn't migrated on `L2_NATIVE_TOKEN_VAULT_ADDRESS`, we can safely use `l2_shared_bridge_addr`. + Some(l2_shared_bridge_addr) + }; + let persistence_layer = OutputHandlerLayer::new( - self.config.remote.l2_legacy_shared_bridge_addr, + l2_legacy_shared_bridge_addr, self.config.optional.l2_block_seal_queue_capacity, ) .with_pre_insert_txs(true) // EN requires txs to be pre-inserted. @@ -369,11 +386,35 @@ impl ExternalNodeBuilder { Ok(self) } + fn add_isolated_tree_api_layer(mut self) -> anyhow::Result { + let reader_config = MerkleTreeReaderConfig { + db_path: self.config.required.merkle_tree_path.clone(), + max_open_files: self.config.optional.merkle_tree_max_open_files, + multi_get_chunk_size: self.config.optional.merkle_tree_multi_get_chunk_size, + block_cache_capacity: self.config.optional.merkle_tree_block_cache_size(), + include_indices_and_filters_in_block_cache: self + .config + .optional + .merkle_tree_include_indices_and_filters_in_block_cache, + }; + let api_config = MerkleTreeApiConfig { + port: self + .config + .tree_component + .api_port + .context("should contain tree api port")?, + }; + self.node + .add_layer(TreeApiServerLayer::new(reader_config, api_config)); + Ok(self) + } + fn add_tx_sender_layer(mut self) -> anyhow::Result { let postgres_storage_config = PostgresStorageCachesConfig { factory_deps_cache_size: self.config.optional.factory_deps_cache_size() as u64, initial_writes_cache_size: self.config.optional.initial_writes_cache_size() as u64, latest_values_cache_size: self.config.optional.latest_values_cache_size() as u64, + latest_values_max_block_lag: 20, // reasonable default }; let max_vm_concurrency = self.config.optional.vm_concurrency_limit; let tx_sender_layer = TxSenderLayer::new( @@ -426,6 +467,10 @@ impl ExternalNodeBuilder { response_body_size_limit: Some(self.config.optional.max_response_body_size()), with_extended_tracing: self.config.optional.extended_rpc_tracing, pruning_info_refresh_interval: Some(pruning_info_refresh_interval), + bridge_addresses_refresh_interval: self + .config + .optional + .bridge_addresses_refresh_interval(), polling_interval: Some(self.config.optional.polling_interval()), websocket_requests_per_minute_limit: None, // To be set by WS server layer method if required. replication_lag_limit: None, // TODO: Support replication lag limit @@ -586,11 +631,11 @@ impl ExternalNodeBuilder { self = self.add_metadata_calculator_layer(with_tree_api)?; } Component::TreeApi => { - anyhow::ensure!( - components.contains(&Component::Tree), - "Merkle tree API cannot be started without a tree component" - ); - // Do nothing, will be handled by the `Tree` component. + if components.contains(&Component::Tree) { + // Do nothing, will be handled by the `Tree` component. + } else { + self = self.add_isolated_tree_api_layer()?; + } } Component::TreeFetcher => { self = self.add_tree_data_fetcher_layer()?; diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index efd76d4fa42..2155de7c020 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -17,15 +17,23 @@ mod utils; const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); const POLL_INTERVAL: Duration = Duration::from_millis(100); -#[test_casing(3, ["all", "core", "api"])] +#[test_casing(4, ["all", "core", "api", "core,tree_api"])] #[tokio::test] #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging - let (env, env_handles) = utils::TestEnvironment::with_genesis_block(components_str).await; - let expected_health_components = utils::expected_health_components(&env.components); + let mut expected_health_components = utils::expected_health_components(&env.components); + let expected_shutdown_components = expected_health_components.clone(); + let has_core_or_api = env.components.0.iter().any(|component| { + [Component::Core, Component::HttpApi, Component::WsApi].contains(component) + }); + if has_core_or_api { + // The `sync_state` component doesn't signal its shutdown, but should be present in the list of components + expected_health_components.push("sync_state"); + } + let l2_client = utils::mock_l2_client(&env); let eth_client = utils::mock_eth_client(env.config.remote.user_facing_diamond_proxy); @@ -84,7 +92,7 @@ async fn external_node_basics(components_str: &'static str) { let health_data = app_health.check_health().await; tracing::info!(?health_data, "final health data"); assert_matches!(health_data.inner().status(), HealthStatus::ShutDown); - for name in expected_health_components { + for name in expected_shutdown_components { let component_health = &health_data.components()[name]; assert_matches!(component_health.status(), HealthStatus::ShutDown); } @@ -162,40 +170,3 @@ async fn running_tree_without_core_is_not_allowed() { err ); } - -#[tokio::test] -async fn running_tree_api_without_tree_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging - let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; - - let l2_client = utils::mock_l2_client(&env); - let eth_client = utils::mock_eth_client(env.config.remote.user_facing_diamond_proxy); - - let node_handle = tokio::task::spawn_blocking(move || { - std::thread::spawn(move || { - let mut node = ExternalNodeBuilder::new(env.config)?; - inject_test_layers( - &mut node, - env.sigint_receiver, - env.app_health_sender, - eth_client, - l2_client, - ); - - // We're only interested in the error, so we drop the result. - node.build(env.components.0.into_iter().collect()).map(drop) - }) - .join() - .unwrap() - }); - - // Check that we cannot build the node without the core component. - let result = node_handle.await.expect("Building the node panicked"); - let err = result.expect_err("Building the node with tree api but without tree should fail"); - assert!( - err.to_string() - .contains("Merkle tree API cannot be started without a tree component"), - "Unexpected errror: {}", - err - ); -} diff --git a/core/bin/genesis_generator/src/main.rs b/core/bin/genesis_generator/src/main.rs index 4f8200b3af7..2a96cdc6c6c 100644 --- a/core/bin/genesis_generator/src/main.rs +++ b/core/bin/genesis_generator/src/main.rs @@ -87,6 +87,7 @@ async fn generate_new_config( genesis_commitment: None, bootloader_hash: Some(base_system_contracts.bootloader), default_aa_hash: Some(base_system_contracts.default_aa), + evm_emulator_hash: base_system_contracts.evm_emulator, ..genesis_config }; diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 084c8037e2c..16167975cf0 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -3,13 +3,13 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; use zksync_contracts::{ load_sys_contract, read_bootloader_code, read_bytecode_from_path, read_sys_contract_bytecode, - BaseSystemContracts, ContractLanguage, SystemContractCode, + read_yul_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, }; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView, WriteStorage}, tracer::VmExecutionStopReason, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterface, VmInterfaceExt, }, tracers::dynamic::vm_1_5_0::DynTracer, @@ -71,12 +71,14 @@ pub static GAS_TEST_SYSTEM_CONTRACTS: Lazy = Lazy::new(|| { let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); let hash = hash_bytecode(&bytecode); + BaseSystemContracts { default_aa: SystemContractCode { code: bytes_to_be_words(bytecode), hash, }, bootloader, + evm_emulator: None, } }); @@ -169,9 +171,16 @@ pub(super) fn get_l1_txs(number_of_txs: usize) -> (Vec, Vec Vec { - read_bytecode_from_path(format!( + if let Some(contract) = read_bytecode_from_path(format!( "contracts/system-contracts/zkout/{test}.yul/contracts-preprocessed/bootloader/{test}.yul.json", - )) + )){ + contract + } else { + read_yul_bytecode( + "contracts/system-contracts/bootloader/tests/artifacts", + test + ) + } } fn default_l1_batch() -> L1BatchEnv { @@ -221,6 +230,7 @@ pub(super) fn execute_internal_transfer_test() -> u32 { let base_system_smart_contracts = BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, }; let system_env = SystemEnv { @@ -231,7 +241,6 @@ pub(super) fn execute_internal_transfer_test() -> u32 { execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::default(), - pubdata_params: Default::default(), }; let eth_token_sys_contract = load_sys_contract("L2BaseToken"); @@ -262,8 +271,9 @@ pub(super) fn execute_internal_transfer_test() -> u32 { output: tracer_result.clone(), } .into_tracer_pointer(); + let mut vm: Vm<_, HistoryEnabled> = Vm::new(l1_batch, system_env, storage_view.to_rc_ptr()); - let result = vm.inspect(&mut tracer.into(), VmExecutionMode::Bootloader); + let result = vm.inspect(&mut tracer.into(), InspectExecutionMode::Bootloader); assert!(!result.result.is_failed(), "The internal call has reverted"); tracer_result.take() @@ -314,7 +324,6 @@ pub(super) fn execute_user_txs_in_test_gas_vm( execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::default(), - pubdata_params: Default::default(), }; let mut vm: Vm<_, HistoryEnabled> = @@ -323,7 +332,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( let mut total_gas_refunded = 0; for tx in txs { vm.push_transaction(tx); - let tx_execution_result = vm.execute(VmExecutionMode::OneTx); + let tx_execution_result = vm.execute(InspectExecutionMode::OneTx); total_gas_refunded += tx_execution_result.refunds.gas_refunded; if !accept_failure { @@ -335,7 +344,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( } } - let result = vm.execute(VmExecutionMode::Bootloader); + let result = vm.execute(InspectExecutionMode::Bootloader); let metrics = result.get_execution_metrics(None); VmSpentResourcesResult { diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 22db202585d..72fdc8de5cd 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -49,7 +49,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,vm_runner_protective_reads" + default_value = "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,vm_runner_protective_reads" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index e684a72a45c..0ac50e624cd 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -4,8 +4,8 @@ use anyhow::Context; use zksync_config::{ configs::{ - da_client::DAClientConfig, eth_sender::PubdataSendingMode, gateway::GatewayChainConfig, - secrets::DataAvailabilitySecrets, wallets::Wallets, GeneralConfig, Secrets, + da_client::DAClientConfig, gateway::GatewayChainConfig, secrets::DataAvailabilitySecrets, + wallets::Wallets, GeneralConfig, Secrets, }, ContractsConfig, GenesisConfig, }; @@ -19,9 +19,7 @@ use zksync_node_framework::{ implementations::layers::{ base_token::{ base_token_ratio_persister::BaseTokenRatioPersisterLayer, - base_token_ratio_provider::BaseTokenRatioProviderLayer, - coingecko_client::CoingeckoClientLayer, forced_price_client::ForcedPriceClientLayer, - no_op_external_price_api_client::NoOpExternalPriceApiClientLayer, + base_token_ratio_provider::BaseTokenRatioProviderLayer, ExternalPriceApiLayer, }, circuit_breaker_checker::CircuitBreakerCheckerLayer, commitment_generator::CommitmentGeneratorLayer, @@ -57,7 +55,6 @@ use zksync_node_framework::{ main_batch_executor::MainBatchExecutorLayer, mempool_io::MempoolIOLayer, output_handler::OutputHandlerLayer, RocksdbStorageOptions, StateKeeperLayer, }, - tee_verifier_input_producer::TeeVerifierInputProducerLayer, vm_runner::{ bwip::BasicWitnessInputProducerLayer, playground::VmPlaygroundLayer, protective_reads::ProtectiveReadsWriterLayer, @@ -72,7 +69,9 @@ use zksync_node_framework::{ }, service::{ZkStackService, ZkStackServiceBuilder}, }; -use zksync_types::{settlement::SettlementMode, SHARED_BRIDGE_ETHER_TOKEN_ADDRESS}; +use zksync_types::{ + pubdata_da::PubdataSendingMode, settlement::SettlementMode, SHARED_BRIDGE_ETHER_TOKEN_ADDRESS, +}; use zksync_vlog::prometheus::PrometheusExporterConfig; /// Macro that looks into a path to fetch an optional config, @@ -192,7 +191,7 @@ impl MainNodeBuilder { .add_layer(BaseTokenRatioProviderLayer::new(base_token_adjuster_config)); } let state_keeper_config = try_load_config!(self.configs.state_keeper_config); - let l1_gas_layer = L1GasLayer::new(state_keeper_config); + let l1_gas_layer = L1GasLayer::new(&state_keeper_config); self.node.add_layer(l1_gas_layer); Ok(self) } @@ -247,9 +246,9 @@ impl MainNodeBuilder { self.genesis_config.l2_chain_id, sk_config.clone(), try_load_config!(self.configs.mempool_config), - self.contracts_config.clone(), - self.genesis_config.clone(), try_load_config!(wallets.state_keeper), + self.contracts_config.l2_da_validator_addr, + self.genesis_config.l1_batch_commit_data_generator_mode, ); let db_config = try_load_config!(self.configs.db_config); let experimental_vm_config = self @@ -296,6 +295,7 @@ impl MainNodeBuilder { self.node.add_layer(ProofDataHandlerLayer::new( try_load_config!(self.configs.proof_data_handler_config), self.genesis_config.l1_batch_commit_data_generator_mode, + self.genesis_config.l2_chain_id, )); Ok(self) } @@ -313,6 +313,7 @@ impl MainNodeBuilder { factory_deps_cache_size: rpc_config.factory_deps_cache_size() as u64, initial_writes_cache_size: rpc_config.initial_writes_cache_size() as u64, latest_values_cache_size: rpc_config.latest_values_cache_size() as u64, + latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(), }; // On main node we always use master pool sink. @@ -500,14 +501,6 @@ impl MainNodeBuilder { Ok(self) } - fn add_tee_verifier_input_producer_layer(mut self) -> anyhow::Result { - self.node.add_layer(TeeVerifierInputProducerLayer::new( - self.genesis_config.l2_chain_id, - )); - - Ok(self) - } - fn add_da_client_layer(mut self) -> anyhow::Result { let Some(da_client_config) = self.configs.da_client_config.clone() else { tracing::warn!("No config for DA client, using the NoDA client"); @@ -563,24 +556,8 @@ impl MainNodeBuilder { fn add_external_api_client_layer(mut self) -> anyhow::Result { let config = try_load_config!(self.configs.external_price_api_client_config); - match config.source.as_str() { - CoingeckoClientLayer::CLIENT_NAME => { - self.node.add_layer(CoingeckoClientLayer::new(config)); - } - NoOpExternalPriceApiClientLayer::CLIENT_NAME => { - self.node.add_layer(NoOpExternalPriceApiClientLayer); - } - ForcedPriceClientLayer::CLIENT_NAME => { - self.node.add_layer(ForcedPriceClientLayer::new(config)); - } - _ => { - anyhow::bail!( - "Unknown external price API client source: {}", - config.source - ); - } - } - + self.node + .add_layer(ExternalPriceApiLayer::try_from(config)?); Ok(self) } @@ -750,9 +727,6 @@ impl MainNodeBuilder { Component::EthTxManager => { self = self.add_eth_tx_manager_layer()?; } - Component::TeeVerifierInputProducer => { - self = self.add_tee_verifier_input_producer_layer()?; - } Component::Housekeeper => { self = self .add_house_keeper_layer()? diff --git a/core/bin/zksync_tee_prover/Cargo.toml b/core/bin/zksync_tee_prover/Cargo.toml index 85908eebeaa..b853da348ee 100644 --- a/core/bin/zksync_tee_prover/Cargo.toml +++ b/core/bin/zksync_tee_prover/Cargo.toml @@ -15,7 +15,7 @@ publish = false anyhow.workspace = true async-trait.workspace = true envy.workspace = true -reqwest.workspace = true +reqwest = { workspace = true, features = ["zstd"] } secp256k1 = { workspace = true, features = ["serde"] } serde = { workspace = true, features = ["derive"] } thiserror.workspace = true diff --git a/core/bin/zksync_tee_prover/src/api_client.rs b/core/bin/zksync_tee_prover/src/api_client.rs index 13fbc1ba886..ffc2839b8d3 100644 --- a/core/bin/zksync_tee_prover/src/api_client.rs +++ b/core/bin/zksync_tee_prover/src/api_client.rs @@ -1,13 +1,10 @@ -use reqwest::Client; +use reqwest::{Client, Response, StatusCode}; use secp256k1::{ecdsa::Signature, PublicKey}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::Serialize; use url::Url; use zksync_basic_types::H256; use zksync_prover_interface::{ - api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitTeeProofRequest, - SubmitTeeProofResponse, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, - }, + api::{RegisterTeeAttestationRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest}, inputs::TeeVerifierInput, outputs::L1BatchTeeProofForL1, }; @@ -31,10 +28,9 @@ impl TeeApiClient { } } - async fn post(&self, endpoint: S, request: Req) -> Result + async fn post(&self, endpoint: S, request: Req) -> Result where Req: Serialize + std::fmt::Debug, - Resp: DeserializeOwned, S: AsRef, { let url = self.api_base_url.join(endpoint.as_ref()).unwrap(); @@ -46,9 +42,7 @@ impl TeeApiClient { .json(&request) .send() .await? - .error_for_status()? - .json::() - .await + .error_for_status() } /// Registers the attestation quote with the TEE prover interface API, effectively proving that @@ -63,8 +57,7 @@ impl TeeApiClient { attestation: attestation_quote_bytes, pubkey: public_key.serialize().to_vec(), }; - self.post::<_, RegisterTeeAttestationResponse, _>("/tee/register_attestation", request) - .await?; + self.post("/tee/register_attestation", request).await?; tracing::info!( "Attestation quote was successfully registered for the public key {}", public_key @@ -77,12 +70,17 @@ impl TeeApiClient { pub async fn get_job( &self, tee_type: TeeType, - ) -> Result>, TeeProverError> { + ) -> Result, TeeProverError> { let request = TeeProofGenerationDataRequest { tee_type }; - let response = self - .post::<_, TeeProofGenerationDataResponse, _>("/tee/proof_inputs", request) - .await?; - Ok(response.0) + let response = self.post("/tee/proof_inputs", request).await?; + match response.status() { + StatusCode::OK => Ok(Some(response.json::().await?)), + StatusCode::NO_CONTENT => Ok(None), + _ => response + .json::>() + .await + .map_err(TeeProverError::Request), + } } /// Submits the successfully verified proof to the TEE prover interface API. @@ -101,7 +99,7 @@ impl TeeApiClient { tee_type, })); let observer = METRICS.proof_submitting_time.start(); - self.post::<_, SubmitTeeProofResponse, _>( + self.post( format!("/tee/submit_proofs/{batch_number}").as_str(), request, ) diff --git a/core/bin/zksync_tee_prover/src/main.rs b/core/bin/zksync_tee_prover/src/main.rs index 70c6f888185..aa0881011da 100644 --- a/core/bin/zksync_tee_prover/src/main.rs +++ b/core/bin/zksync_tee_prover/src/main.rs @@ -45,11 +45,12 @@ fn main() -> anyhow::Result<()> { .add_layer(SigintHandlerLayer) .add_layer(TeeProverLayer::new(tee_prover_config)); - if let Some(gateway) = prometheus_config.gateway_endpoint() { - let exporter_config = - PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()); - builder.add_layer(PrometheusExporterLayer(exporter_config)); - } + let exporter_config = if let Some(gateway) = prometheus_config.gateway_endpoint() { + PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()) + } else { + PrometheusExporterConfig::pull(prometheus_config.listener_port) + }; + builder.add_layer(PrometheusExporterLayer(exporter_config)); builder.build().run(observability_guard)?; Ok(()) diff --git a/core/bin/zksync_tee_prover/src/metrics.rs b/core/bin/zksync_tee_prover/src/metrics.rs index 9f535967f79..769a8bbc7e0 100644 --- a/core/bin/zksync_tee_prover/src/metrics.rs +++ b/core/bin/zksync_tee_prover/src/metrics.rs @@ -2,7 +2,7 @@ use std::time::Duration; -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; +use vise::{Buckets, Counter, Gauge, Histogram, Metrics, Unit}; #[derive(Debug, Metrics)] #[metrics(prefix = "tee_prover")] @@ -13,7 +13,7 @@ pub(crate) struct TeeProverMetrics { pub proof_generation_time: Histogram, #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] pub proof_submitting_time: Histogram, - pub network_errors_counter: Gauge, + pub network_errors_counter: Counter, pub last_batch_number_processed: Gauge, } diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 1511f0c88e3..5d22d1e7c63 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -90,9 +90,9 @@ impl TeeProver { } async fn step(&self, public_key: &PublicKey) -> Result, TeeProverError> { - match self.api_client.get_job(self.config.tee_type).await? { - Some(job) => { - let (signature, batch_number, root_hash) = self.verify(*job)?; + match self.api_client.get_job(self.config.tee_type).await { + Ok(Some(job)) => { + let (signature, batch_number, root_hash) = self.verify(job)?; self.api_client .submit_proof( batch_number, @@ -104,10 +104,11 @@ impl TeeProver { .await?; Ok(Some(batch_number)) } - None => { + Ok(None) => { tracing::trace!("There are currently no pending batches to be proven"); Ok(None) } + Err(err) => Err(err), } } } @@ -154,7 +155,7 @@ impl Task for TeeProver { } } Err(err) => { - METRICS.network_errors_counter.inc_by(1); + METRICS.network_errors_counter.inc(); if !err.is_retriable() || retries > config.max_retries { return Err(err.into()); } diff --git a/core/lib/basic_types/src/api_key.rs b/core/lib/basic_types/src/api_key.rs new file mode 100644 index 00000000000..eadf4e9051b --- /dev/null +++ b/core/lib/basic_types/src/api_key.rs @@ -0,0 +1,20 @@ +use std::str::FromStr; + +use secrecy::{ExposeSecret, Secret}; + +#[derive(Debug, Clone)] +pub struct APIKey(pub Secret); + +impl PartialEq for APIKey { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for APIKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(APIKey(s.parse()?)) + } +} diff --git a/core/lib/basic_types/src/commitment.rs b/core/lib/basic_types/src/commitment.rs index 56d36b22aff..0eed46aad78 100644 --- a/core/lib/basic_types/src/commitment.rs +++ b/core/lib/basic_types/src/commitment.rs @@ -1,13 +1,12 @@ use std::str::FromStr; -use ethabi::Address; use serde::{Deserialize, Serialize}; use strum::{Display, EnumIter}; use crate::{ ethabi, web3::contract::{Detokenize, Error as ContractError}, - U256, + Address, U256, }; #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, EnumIter, Display)] @@ -62,6 +61,5 @@ impl FromStr for L1BatchCommitmentMode { #[derive(Default, Copy, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PubdataParams { pub l2_da_validator_address: Address, - // TOODO: maybe rename / use new type. pub pubdata_type: L1BatchCommitmentMode, } diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index b94f7fbcd27..d1180048efb 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -13,6 +13,7 @@ use std::{ str::FromStr, }; +use anyhow::Context as _; pub use ethabi::{ self, ethereum_types::{ @@ -23,11 +24,13 @@ use serde::{de, Deserialize, Deserializer, Serialize}; #[macro_use] mod macros; +pub mod api_key; pub mod basic_fri_types; pub mod commitment; pub mod network; pub mod protocol_version; pub mod prover_dal; +pub mod pubdata_da; pub mod seed_phrase; pub mod settlement; pub mod tee_types; @@ -35,6 +38,21 @@ pub mod url; pub mod vm; pub mod web3; +/// Parses H256 from a slice of bytes. +pub fn parse_h256(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) +} + +/// Parses H256 from an optional slice of bytes. +pub fn parse_h256_opt(bytes: Option<&[u8]>) -> anyhow::Result { + parse_h256(bytes.context("missing data")?) +} + +/// Parses H160 from a slice of bytes. +pub fn parse_h160(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) +} + /// Account place in the global state tree is uniquely identified by its address. /// Binary this type is represented by 160 bit big-endian representation of account address. #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)] diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 132b78b51b5..88513360916 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -69,15 +69,17 @@ pub enum ProtocolVersionId { Version24, Version25, Version26, + Version27, + Version28, } impl ProtocolVersionId { pub const fn latest() -> Self { - Self::Version25 + Self::Version27 } pub const fn next() -> Self { - Self::Version26 + Self::Version28 } pub fn try_from_packed_semver(packed_semver: U256) -> Result { @@ -120,8 +122,10 @@ impl ProtocolVersionId { ProtocolVersionId::Version22 => VmVersion::Vm1_4_2, ProtocolVersionId::Version23 => VmVersion::Vm1_5_0SmallBootloaderMemory, ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, - ProtocolVersionId::Version25 => VmVersion::VmGateway, - ProtocolVersionId::Version26 => VmVersion::VmGateway, + ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, + ProtocolVersionId::Version28 => VmVersion::VmGateway, } } @@ -140,7 +144,7 @@ impl ProtocolVersionId { } pub fn is_pre_gateway(&self) -> bool { - self <= &Self::Version24 + self <= &Self::Version26 } pub fn is_1_4_0(&self) -> bool { @@ -280,8 +284,10 @@ impl From for VmVersion { ProtocolVersionId::Version22 => VmVersion::Vm1_4_2, ProtocolVersionId::Version23 => VmVersion::Vm1_5_0SmallBootloaderMemory, ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, - ProtocolVersionId::Version25 => VmVersion::VmGateway, - ProtocolVersionId::Version26 => VmVersion::VmGateway, + ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, + ProtocolVersionId::Version28 => VmVersion::VmGateway, } } } diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/basic_types/src/pubdata_da.rs similarity index 54% rename from core/lib/types/src/pubdata_da.rs rename to core/lib/basic_types/src/pubdata_da.rs index bc7dc55e53d..3f042da98ac 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/basic_types/src/pubdata_da.rs @@ -1,15 +1,17 @@ +//! Types related to data availability. + use chrono::{DateTime, Utc}; use num_enum::TryFromPrimitive; use serde::{Deserialize, Serialize}; -use zksync_basic_types::L1BatchNumber; -use zksync_config::configs::eth_sender::PubdataSendingMode; + +use crate::L1BatchNumber; /// Enum holding the current values used for DA Layers. #[repr(u8)] -#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] -#[derive(TryFromPrimitive)] -pub enum PubdataDA { +#[derive(Debug, Clone, Copy, Default, PartialEq, Deserialize, Serialize, TryFromPrimitive)] +pub enum PubdataSendingMode { /// Pubdata is sent to the L1 as a tx calldata. + #[default] Calldata = 0, /// Pubdata is sent to L1 as EIP-4844 blobs. Blobs, @@ -19,17 +21,6 @@ pub enum PubdataDA { RelayedL2Calldata, } -impl From for PubdataDA { - fn from(value: PubdataSendingMode) -> Self { - match value { - PubdataSendingMode::Calldata => PubdataDA::Calldata, - PubdataSendingMode::Blobs => PubdataDA::Blobs, - PubdataSendingMode::Custom => PubdataDA::Custom, - PubdataSendingMode::RelayedL2Calldata => PubdataDA::RelayedL2Calldata, - } - } -} - /// Represents a blob in the data availability layer. #[derive(Debug, Clone)] pub struct DataAvailabilityBlob { diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index ecbe73f785b..aa7c4967033 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -21,6 +21,35 @@ mod tests; pub type Index = U64; +/// Number that can be either hex-encoded or decimal. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(untagged)] +pub enum U64Number { + Hex(U64), + Number(u64), +} + +impl From for u64 { + fn from(value: U64Number) -> Self { + match value { + U64Number::Hex(number) => number.as_u64(), + U64Number::Number(number) => number, + } + } +} + +impl From for U64Number { + fn from(value: u64) -> Self { + Self::Number(value) + } +} + +impl From for U64Number { + fn from(value: U64) -> Self { + Self::Hex(value) + } +} + // `Signature`, `keccak256`: from `web3::signing` /// A struct that represents the components of a secp256k1 signature. diff --git a/core/lib/basic_types/src/web3/tests.rs b/core/lib/basic_types/src/web3/tests.rs index 7f85bf12eb8..70805ab8b39 100644 --- a/core/lib/basic_types/src/web3/tests.rs +++ b/core/lib/basic_types/src/web3/tests.rs @@ -128,3 +128,13 @@ fn test_bytes_serde_json() { let decoded: Bytes = serde_json::from_str(&encoded).unwrap(); assert_eq!(original, decoded); } + +#[test] +fn deserializing_u64_number() { + let number: U64Number = serde_json::from_value(serde_json::json!(123)).unwrap(); + assert_eq!(u64::from(number), 123); + let number: U64Number = serde_json::from_value(serde_json::json!("0x123")).unwrap(); + assert_eq!(u64::from(number), 0x123); + let number: U64Number = serde_json::from_value(serde_json::json!("123")).unwrap(); + assert_eq!(u64::from(number), 0x123); +} diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index d1ab5ce8438..af39e5159ba 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -23,6 +23,10 @@ anyhow.workspace = true rand.workspace = true secrecy.workspace = true serde = { workspace = true, features = ["derive"] } +time = { workspace = true, features = ["serde-human-readable"] } +strum.workspace = true +strum_macros.workspace = true +vise.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index dab4c4fa037..21cf44cc073 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -189,6 +189,10 @@ pub struct Web3JsonRpcConfig { /// Latest values cache size in MiBs. The default value is 128 MiB. If set to 0, the latest /// values cache will be disabled. pub latest_values_cache_size_mb: Option, + /// Maximum lag in the number of blocks for the latest values cache after which the cache is reset. Greater values + /// lead to increased the cache update latency, i.e., less storage queries being processed by the cache. OTOH, smaller values + /// can lead to spurious resets when Postgres lags for whatever reason (e.g., when sealing L1 batches). + pub latest_values_max_block_lag: Option, /// Limit for fee history block range. pub fee_history_limit: Option, /// Maximum number of requests in a single batch JSON RPC request. Default is 500. @@ -246,20 +250,21 @@ impl Web3JsonRpcConfig { estimate_gas_acceptable_overestimation: 1000, estimate_gas_optimize_search: false, max_tx_size: 1000000, - vm_execution_cache_misses_limit: Default::default(), - vm_concurrency_limit: Default::default(), - factory_deps_cache_size_mb: Default::default(), - initial_writes_cache_size_mb: Default::default(), - latest_values_cache_size_mb: Default::default(), - fee_history_limit: Default::default(), - max_batch_request_size: Default::default(), - max_response_body_size_mb: Default::default(), + vm_execution_cache_misses_limit: None, + vm_concurrency_limit: None, + factory_deps_cache_size_mb: None, + initial_writes_cache_size_mb: None, + latest_values_cache_size_mb: None, + latest_values_max_block_lag: None, + fee_history_limit: None, + max_batch_request_size: None, + max_response_body_size_mb: None, max_response_body_size_overrides_mb: MaxResponseSizeOverrides::empty(), - websocket_requests_per_minute_limit: Default::default(), - mempool_cache_update_interval: Default::default(), - mempool_cache_size: Default::default(), + websocket_requests_per_minute_limit: None, + mempool_cache_update_interval: None, + mempool_cache_size: None, tree_api_url: None, - whitelisted_tokens_for_aa: Default::default(), + whitelisted_tokens_for_aa: vec![], api_namespaces: None, extended_api_tracing: false, settlement_layer_url: None, @@ -312,6 +317,11 @@ impl Web3JsonRpcConfig { self.latest_values_cache_size_mb.unwrap_or(128) * super::BYTES_IN_MEGABYTE } + /// Returns the maximum lag in the number of blocks for the latest values cache. + pub fn latest_values_max_block_lag(&self) -> u32 { + self.latest_values_max_block_lag.map_or(20, NonZeroU32::get) + } + pub fn fee_history_limit(&self) -> u64 { self.fee_history_limit.unwrap_or(1024) } diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 7e33f6964bb..c117064dbc4 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -138,6 +138,8 @@ pub struct StateKeeperConfig { pub bootloader_hash: Option, #[deprecated(note = "Use GenesisConfig::default_aa_hash instead")] pub default_aa_hash: Option, + #[deprecated(note = "Use GenesisConfig::evm_emulator_hash instead")] + pub evm_emulator_hash: Option, #[deprecated(note = "Use GenesisConfig::l1_batch_commit_data_generator_mode instead")] #[serde(default)] pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, @@ -178,6 +180,7 @@ impl StateKeeperConfig { protective_reads_persistence_enabled: true, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, } } diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 918d8f4adab..7f5a0f56aa1 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -115,6 +115,7 @@ impl RpcConfig { /// Config (shared between main node and external node). #[derive(Clone, Debug, PartialEq)] pub struct ConsensusConfig { + pub port: Option, /// Local socket address to listen for the incoming connections. pub server_addr: std::net::SocketAddr, /// Public address of this node (should forward to `server_addr`) diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index 24f50243758..1d49a09d213 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -45,16 +45,12 @@ pub struct ContractsConfig { pub ecosystem_contracts: Option, // Used by the RPC API and by the node builder in wiring the BaseTokenRatioProvider layer. pub base_token_addr: Option
, - // FIXME: maybe refactor pub user_facing_bridgehub_proxy_addr: Option
, pub user_facing_diamond_proxy_addr: Option
, - - pub l2_da_validator_addr: Option
, - pub chain_admin_addr: Option
, - pub settlement_layer: Option, + pub l2_da_validator_addr: Option
, } impl ContractsConfig { @@ -68,7 +64,7 @@ impl ContractsConfig { l2_erc20_bridge_addr: Some(Address::repeat_byte(0x0c)), l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(0x0e)), l2_shared_bridge_addr: Some(Address::repeat_byte(0x0f)), - l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(0xff)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(0x19)), l1_weth_bridge_proxy_addr: Some(Address::repeat_byte(0x0b)), l2_weth_bridge_addr: Some(Address::repeat_byte(0x0c)), l2_testnet_paymaster_addr: Some(Address::repeat_byte(0x11)), @@ -79,8 +75,8 @@ impl ContractsConfig { user_facing_bridgehub_proxy_addr: Some(Address::repeat_byte(0x15)), user_facing_diamond_proxy_addr: Some(Address::repeat_byte(0x16)), chain_admin_addr: Some(Address::repeat_byte(0x18)), - l2_da_validator_addr: Some(Address::repeat_byte(0x19)), settlement_layer: Some(0), + l2_da_validator_addr: Some(Address::repeat_byte(0x1a)), } } } diff --git a/core/lib/config/src/configs/da_client/avail.rs b/core/lib/config/src/configs/da_client/avail.rs index 590dc5fef18..b8e9db0f393 100644 --- a/core/lib/config/src/configs/da_client/avail.rs +++ b/core/lib/config/src/configs/da_client/avail.rs @@ -1,16 +1,38 @@ use serde::Deserialize; -use zksync_basic_types::seed_phrase::SeedPhrase; +use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase}; + +pub const AVAIL_GAS_RELAY_CLIENT_NAME: &str = "GasRelay"; +pub const AVAIL_FULL_CLIENT_NAME: &str = "FullClient"; + +#[derive(Clone, Debug, PartialEq, Deserialize)] +#[serde(tag = "avail_client")] +pub enum AvailClientConfig { + FullClient(AvailDefaultConfig), + GasRelay(AvailGasRelayConfig), +} #[derive(Clone, Debug, PartialEq, Deserialize)] pub struct AvailConfig { - pub api_node_url: String, pub bridge_api_url: String, - pub app_id: u32, pub timeout: usize, + #[serde(flatten)] + pub config: AvailClientConfig, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailDefaultConfig { + pub api_node_url: String, + pub app_id: u32, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailGasRelayConfig { + pub gas_relay_api_url: String, pub max_retries: usize, } #[derive(Clone, Debug, PartialEq)] pub struct AvailSecrets { pub seed_phrase: Option, + pub gas_relay_api_key: Option, } diff --git a/core/lib/config/src/configs/en_config.rs b/core/lib/config/src/configs/en_config.rs index 7f130e3539a..4cab47b0779 100644 --- a/core/lib/config/src/configs/en_config.rs +++ b/core/lib/config/src/configs/en_config.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use std::num::{NonZeroU64, NonZeroUsize}; use serde::Deserialize; use zksync_basic_types::{ @@ -19,4 +19,5 @@ pub struct ENConfig { pub main_node_rate_limit_rps: Option, pub gateway_url: Option, + pub bridge_addresses_refresh_interval_sec: Option, } diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 51f7736fbb7..ab12642c7ba 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context as _; use serde::Deserialize; -use zksync_basic_types::{settlement::SettlementMode, H256}; +use zksync_basic_types::{pubdata_da::PubdataSendingMode, settlement::SettlementMode, H256}; use zksync_crypto_primitives::K256PrivateKey; use crate::EthWatchConfig; @@ -44,6 +44,7 @@ impl EthConfig { tx_aggregation_only_prove_and_execute: false, ignore_db_nonce: None, priority_tree_start_index: Some(0), + time_in_mempool_in_l1_blocks_cap: 1800, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 1000000000, @@ -81,15 +82,6 @@ pub enum ProofLoadingMode { FriProofFromGcs, } -#[derive(Debug, Deserialize, Clone, Copy, PartialEq, Default)] -pub enum PubdataSendingMode { - #[default] - Calldata, - Blobs, - Custom, - RelayedL2Calldata, -} - #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct SenderConfig { pub aggregated_proof_sizes: Vec, @@ -129,11 +121,13 @@ pub struct SenderConfig { /// special mode specifically for gateway migration to decrease number of non-executed batches #[serde(default = "SenderConfig::default_tx_aggregation_only_prove_and_execute")] pub tx_aggregation_only_prove_and_execute: bool, - /// Used to ignore db nonce check for sender and only use the RPC one. pub ignore_db_nonce: Option, /// Index of the priority operation to start building the `PriorityMerkleTree` from. pub priority_tree_start_index: Option, + /// Cap of time in mempool for price calculations + #[serde(default = "SenderConfig::default_time_in_mempool_in_l1_blocks_cap")] + pub time_in_mempool_in_l1_blocks_cap: u32, } impl SenderConfig { @@ -183,6 +177,13 @@ impl SenderConfig { const fn default_tx_aggregation_only_prove_and_execute() -> bool { false } + + pub const fn default_time_in_mempool_in_l1_blocks_cap() -> u32 { + let blocks_per_hour = 3600 / 12; + // we cap it at 6h to not allow nearly infinite values when a tx is stuck for a long time + // 1,001 ^ 1800 ~= 6, so by default we cap exponential price formula at roughly median * 6 + blocks_per_hour * 6 + } } #[derive(Debug, Deserialize, Copy, Clone, PartialEq, Default)] @@ -192,8 +193,10 @@ pub struct GasAdjusterConfig { /// Number of blocks collected by GasAdjuster from which base_fee median is taken pub max_base_fee_samples: usize, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_a")] pub pricing_formula_parameter_a: f64, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_b")] pub pricing_formula_parameter_b: f64, /// Parameter by which the base fee will be multiplied for internal purposes pub internal_l1_pricing_multiplier: f64, @@ -240,4 +243,12 @@ impl GasAdjusterConfig { pub const fn default_internal_pubdata_pricing_multiplier() -> f64 { 1.0 } + + pub const fn default_pricing_formula_parameter_a() -> f64 { + 1.1 + } + + pub const fn default_pricing_formula_parameter_b() -> f64 { + 1.001 + } } diff --git a/core/lib/config/src/configs/genesis.rs b/core/lib/config/src/configs/genesis.rs index 6c4bacc3a6e..9e1ffbd87cb 100644 --- a/core/lib/config/src/configs/genesis.rs +++ b/core/lib/config/src/configs/genesis.rs @@ -17,6 +17,7 @@ pub struct GenesisConfig { pub genesis_commitment: Option, pub bootloader_hash: Option, pub default_aa_hash: Option, + pub evm_emulator_hash: Option, pub l1_chain_id: L1ChainId, pub sl_chain_id: Option, pub l2_chain_id: L2ChainId, @@ -49,6 +50,7 @@ impl GenesisConfig { genesis_commitment: Some(H256::repeat_byte(0x17)), bootloader_hash: Default::default(), default_aa_hash: Default::default(), + evm_emulator_hash: Default::default(), l1_chain_id: L1ChainId(9), sl_chain_id: None, protocol_version: Some(ProtocolSemanticVersion { diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 9ece81dc7cd..ac570589d9c 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -23,7 +23,7 @@ pub use self::{ genesis::GenesisConfig, object_store::ObjectStoreConfig, observability::{ObservabilityConfig, OpentelemetryConfig}, - proof_data_handler::ProofDataHandlerConfig, + proof_data_handler::{ProofDataHandlerConfig, TeeConfig}, prover_job_monitor::ProverJobMonitorConfig, pruning::PruningConfig, secrets::{DatabaseSecrets, L1Secrets, Secrets}, @@ -62,6 +62,7 @@ pub mod house_keeper; pub mod object_store; pub mod observability; pub mod proof_data_handler; +pub mod prover_autoscaler; pub mod prover_job_monitor; pub mod pruning; pub mod secrets; diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index de7f6969b05..1094b1bb180 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -1,12 +1,43 @@ use std::time::Duration; use serde::Deserialize; +use zksync_basic_types::L1BatchNumber; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct TeeConfig { + /// If true, the TEE support is enabled. + pub tee_support: bool, + /// All batches before this one are considered to be processed. + pub first_tee_processed_batch: L1BatchNumber, +} + +impl Default for TeeConfig { + fn default() -> Self { + TeeConfig { + tee_support: Self::default_tee_support(), + first_tee_processed_batch: Self::default_first_tee_processed_batch(), + } + } +} + +impl TeeConfig { + pub fn default_tee_support() -> bool { + false + } + + pub fn default_first_tee_processed_batch() -> L1BatchNumber { + L1BatchNumber(0) + } +} #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ProofDataHandlerConfig { pub http_port: u16, pub proof_generation_timeout_in_secs: u16, - pub tee_support: bool, + #[serde(skip)] + // ^ Filled in separately in `Self::from_env()`. We cannot use `serde(flatten)` because it + // doesn't work with `envy`: https://github.com/softprops/envy/issues/26 + pub tee_config: TeeConfig, } impl ProofDataHandlerConfig { diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs new file mode 100644 index 00000000000..b24a1a26651 --- /dev/null +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -0,0 +1,128 @@ +use std::collections::HashMap; + +use serde::Deserialize; +use strum::Display; +use strum_macros::EnumString; +use time::Duration; +use vise::EncodeLabelValue; + +use crate::configs::ObservabilityConfig; + +/// Config used for running ProverAutoscaler (both Scaler and Agent). +#[derive(Debug, Clone, PartialEq)] +pub struct ProverAutoscalerConfig { + /// Amount of time ProverJobMonitor will wait all it's tasks to finish. + // TODO: find a way to use #[serde(with = "humantime_serde")] with time::Duration. + pub graceful_shutdown_timeout: Duration, + pub agent_config: Option, + pub scaler_config: Option, + pub observability: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ProverAutoscalerAgentConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// HTTP port for global Scaler to connect to the Agent running in a cluster. + pub http_port: u16, + /// List of namespaces to watch. + #[serde(default = "ProverAutoscalerAgentConfig::default_namespaces")] + pub namespaces: Vec, + /// Watched cluster name. Also can be set via flag. + pub cluster_name: Option, + /// If dry-run enabled don't do any k8s updates, just report success. + #[serde(default = "ProverAutoscalerAgentConfig::default_dry_run")] + pub dry_run: bool, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct ProverAutoscalerScalerConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// The interval between runs for global Scaler. + #[serde(default = "ProverAutoscalerScalerConfig::default_scaler_run_interval")] + pub scaler_run_interval: Duration, + /// URL to get queue reports from. + /// In production should be "http://prover-job-monitor.stage2.svc.cluster.local:3074/queue_report". + #[serde(default = "ProverAutoscalerScalerConfig::default_prover_job_monitor_url")] + pub prover_job_monitor_url: String, + /// List of ProverAutoscaler Agents to get cluster data from. + pub agents: Vec, + /// Mapping of namespaces to protocol versions. + pub protocol_versions: HashMap, + /// Default priorities, which cluster to prefer when there is no other information. + pub cluster_priorities: HashMap, + /// Prover speed per GPU. Used to calculate desired number of provers for queue size. + pub prover_speed: HashMap, + /// Maximum number of provers which can be run per cluster/GPU. + pub max_provers: HashMap>, + /// Minimum number of provers per namespace. + pub min_provers: HashMap, + /// Duration after which pending pod considered long pending. + #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] + pub long_pending_duration: Duration, +} + +#[derive( + Default, + Debug, + Display, + Hash, + PartialEq, + Eq, + Clone, + Copy, + Ord, + PartialOrd, + EnumString, + EncodeLabelValue, + Deserialize, +)] +pub enum Gpu { + #[default] + Unknown, + #[strum(ascii_case_insensitive)] + L4, + #[strum(ascii_case_insensitive)] + T4, + #[strum(ascii_case_insensitive)] + V100, + #[strum(ascii_case_insensitive)] + P100, + #[strum(ascii_case_insensitive)] + A100, +} + +impl ProverAutoscalerConfig { + /// Default graceful shutdown timeout -- 5 seconds + pub fn default_graceful_shutdown_timeout() -> Duration { + Duration::seconds(5) + } +} + +impl ProverAutoscalerAgentConfig { + pub fn default_namespaces() -> Vec { + vec!["prover-blue".to_string(), "prover-red".to_string()] + } + + pub fn default_dry_run() -> bool { + true + } +} + +impl ProverAutoscalerScalerConfig { + /// Default scaler_run_interval -- 10s + pub fn default_scaler_run_interval() -> Duration { + Duration::seconds(10) + } + + /// Default prover_job_monitor_url -- cluster local URL + pub fn default_prover_job_monitor_url() -> String { + "http://localhost:3074/queue_report".to_string() + } + + /// Default long_pending_duration -- 10m + pub fn default_long_pending_duration() -> Duration { + Duration::minutes(10) + } +} diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 0dca7335d1b..9d72b4ab367 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -3,10 +3,12 @@ use std::num::NonZeroUsize; use rand::{distributions::Distribution, Rng}; use secrecy::Secret; use zksync_basic_types::{ + api_key::APIKey, basic_fri_types::CircuitIdRoundTuple, commitment::L1BatchCommitmentMode, network::Network, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + pubdata_da::PubdataSendingMode, seed_phrase::SeedPhrase, vm::FastVmMode, L1BatchNumber, L1ChainId, L2ChainId, @@ -16,7 +18,11 @@ use zksync_crypto_primitives::K256PrivateKey; use crate::{ configs::{ - self, da_client::DAClientConfig::Avail, eth_sender::PubdataSendingMode, + self, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::Avail, + }, external_price_api_client::ForcedPriceClientConfig, }, AvailConfig, @@ -86,6 +92,7 @@ impl Distribution for EncodeDist { factory_deps_cache_size_mb: self.sample(rng), initial_writes_cache_size_mb: self.sample(rng), latest_values_cache_size_mb: self.sample(rng), + latest_values_max_block_lag: self.sample(rng), fee_history_limit: self.sample(rng), max_batch_request_size: self.sample(rng), max_response_body_size_mb: self.sample(rng), @@ -193,6 +200,7 @@ impl Distribution for EncodeDist { fee_account_addr: None, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: Default::default(), } } @@ -263,10 +271,10 @@ impl Distribution for EncodeDist { ecosystem_contracts: self.sample(rng), user_facing_bridgehub_proxy_addr: rng.gen(), user_facing_diamond_proxy_addr: rng.gen(), - l2_da_validator_addr: rng.gen(), base_token_addr: self.sample_opt(|| rng.gen()), chain_admin_addr: self.sample_opt(|| rng.gen()), settlement_layer: self.sample_opt(|| rng.gen()), + l2_da_validator_addr: self.sample_opt(|| rng.gen()), } } } @@ -392,17 +400,6 @@ impl Distribution for EncodeDist { } } -impl Distribution for EncodeDist { - fn sample(&self, rng: &mut R) -> configs::eth_sender::PubdataSendingMode { - type T = configs::eth_sender::PubdataSendingMode; - match rng.gen_range(0..3) { - 0 => T::Calldata, - 1 => T::Blobs, - _ => T::Custom, - } - } -} - impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::eth_sender::SenderConfig { configs::eth_sender::SenderConfig { @@ -427,6 +424,7 @@ impl Distribution for EncodeDist { tx_aggregation_only_prove_and_execute: false, ignore_db_nonce: None, priority_tree_start_index: self.sample(rng), + time_in_mempool_in_l1_blocks_cap: self.sample(rng), } } } @@ -685,7 +683,10 @@ impl Distribution for EncodeDist { configs::ProofDataHandlerConfig { http_port: self.sample(rng), proof_generation_timeout_in_secs: self.sample(rng), - tee_support: self.sample(rng), + tee_config: configs::TeeConfig { + tee_support: self.sample(rng), + first_tee_processed_batch: L1BatchNumber(rng.gen()), + }, } } } @@ -739,6 +740,7 @@ impl Distribution for EncodeDist { genesis_commitment: Some(rng.gen()), bootloader_hash: Some(rng.gen()), default_aa_hash: Some(rng.gen()), + evm_emulator_hash: Some(rng.gen()), fee_account: rng.gen(), l1_chain_id: L1ChainId(self.sample(rng)), sl_chain_id: None, @@ -807,6 +809,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusConfig { use configs::consensus::{ConsensusConfig, Host, NodePublicKey}; ConsensusConfig { + port: self.sample(rng), server_addr: self.sample(rng), public_addr: Host(self.sample(rng)), max_payload_size: self.sample(rng), @@ -943,6 +946,7 @@ impl Distribution for EncodeDist { main_node_rate_limit_rps: self.sample_opt(|| rng.gen()), gateway_url: self .sample_opt(|| format!("localhost:{}", rng.gen::()).parse().unwrap()), + bridge_addresses_refresh_interval_sec: self.sample_opt(|| rng.gen()), } } } @@ -950,11 +954,12 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::da_client::DAClientConfig { Avail(AvailConfig { - api_node_url: self.sample(rng), bridge_api_url: self.sample(rng), - app_id: self.sample(rng), timeout: self.sample(rng), - max_retries: self.sample(rng), + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: self.sample(rng), + app_id: self.sample(rng), + }), }) } } @@ -963,6 +968,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::secrets::DataAvailabilitySecrets { configs::secrets::DataAvailabilitySecrets::Avail(configs::da_client::avail::AvailSecrets { seed_phrase: Some(SeedPhrase(Secret::new(self.sample(rng)))), + gas_relay_api_key: Some(APIKey(Secret::new(self.sample(rng)))), }) } } diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index 1518d2d9f77..f9138b2bbf1 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -130,6 +130,11 @@ pub const CODE_ORACLE_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x12, ]); +pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x13, +]); + /// Note, that the `Create2Factory` and higher are explicitly deployed on a non-system-contract address. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -140,18 +145,22 @@ pub const L2_GENESIS_UPGRADE_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, ]); + pub const L2_BRIDGEHUB_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, ]); + pub const L2_ASSET_ROUTER_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, ]); + pub const L2_NATIVE_TOKEN_VAULT_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, ]); + pub const L2_MESSAGE_ROOT_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, diff --git a/core/lib/constants/src/lib.rs b/core/lib/constants/src/lib.rs index 6aab79ad71f..30ae6a7b582 100644 --- a/core/lib/constants/src/lib.rs +++ b/core/lib/constants/src/lib.rs @@ -3,6 +3,7 @@ pub mod contracts; pub mod crypto; pub mod ethereum; pub mod fees; +pub mod message_root; pub mod system_context; pub mod system_logs; pub mod trusted_slots; diff --git a/core/lib/constants/src/message_root.rs b/core/lib/constants/src/message_root.rs new file mode 100644 index 00000000000..a8f4a034fb9 --- /dev/null +++ b/core/lib/constants/src/message_root.rs @@ -0,0 +1,5 @@ +// Position of `FullTree::_height` in `MessageRoot`'s storage layout. +pub const AGG_TREE_HEIGHT_KEY: usize = 3; + +// Position of `FullTree::nodes` in `MessageRoot`'s storage layout. +pub const AGG_TREE_NODES_KEY: usize = 5; diff --git a/core/lib/constants/src/system_logs.rs b/core/lib/constants/src/system_logs.rs index e2cc58444f3..aa2c2cc156c 100644 --- a/core/lib/constants/src/system_logs.rs +++ b/core/lib/constants/src/system_logs.rs @@ -2,10 +2,7 @@ pub const L2_TO_L1_LOGS_TREE_ROOT_KEY: u32 = 0; /// The key of the system log with value of the state diff hash for pre-gateway protocol versions -pub const STATE_DIFF_HASH_KEY_PRE_GATEWAY: u64 = 2; +pub const STATE_DIFF_HASH_KEY_PRE_GATEWAY: u32 = 2; -/// The key of the system log with value of the first blob linear hash -pub const BLOB1_LINEAR_HASH_KEY: u32 = 7; - -/// The key of the system log with value of the second blob linear hash -pub const BLOB2_LINEAR_HASH_KEY: u32 = 8; +/// The key of the system log with value of the first blob linear hash for pre-gateway protocol versions +pub const BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY: u32 = 7; diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 06d6a235337..af9b5fe99f2 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -69,20 +69,21 @@ fn home_path() -> PathBuf { Workspace::locate().core() } -fn read_file_to_json_value(path: impl AsRef + std::fmt::Debug) -> serde_json::Value { +fn read_file_to_json_value(path: impl AsRef + std::fmt::Debug) -> Option { let zksync_home = home_path(); let path = Path::new(&zksync_home).join(path); - let file = - File::open(&path).unwrap_or_else(|e| panic!("Failed to open file {:?}: {}", path, e)); - serde_json::from_reader(BufReader::new(file)) - .unwrap_or_else(|e| panic!("Failed to parse file {:?}: {}", path, e)) + let file = File::open(&path).ok()?; + Some( + serde_json::from_reader(BufReader::new(file)) + .unwrap_or_else(|e| panic!("Failed to parse file {:?}: {}", path, e)), + ) } fn load_contract_if_present + std::fmt::Debug>(path: P) -> Option { let zksync_home = home_path(); let path = Path::new(&zksync_home).join(path); path.exists().then(|| { - serde_json::from_value(read_file_to_json_value(&path)["abi"].take()) + serde_json::from_value(read_file_to_json_value(&path).unwrap()["abi"].take()) .unwrap_or_else(|e| panic!("Failed to parse contract abi from file {:?}: {}", path, e)) }) } @@ -114,24 +115,26 @@ pub fn load_contract + std::fmt::Debug>(path: P) -> Contract { } pub fn load_sys_contract(contract_name: &str) -> Contract { - load_contract(format!( + if let Some(contract) = load_contract_if_present(format!( "contracts/system-contracts/artifacts-zk/contracts-preprocessed/{0}.sol/{0}.json", contract_name - )) -} - -pub fn load_sys_contract_interface(contract_name: &str) -> Contract { - load_contract(format!( - "contracts/system-contracts/artifacts-zk/contracts-preprocessed/interfaces/{0}.sol/{0}.json", - contract_name - )) + )) { + contract + } else { + load_contract(format!( + "contracts/system-contracts/zkout/{0}.sol/{0}.json", + contract_name + )) + } } -pub fn read_contract_abi(path: impl AsRef + std::fmt::Debug) -> String { - read_file_to_json_value(path)["abi"] - .as_str() - .expect("Failed to parse abi") - .to_string() +pub fn read_contract_abi(path: impl AsRef + std::fmt::Debug) -> Option { + Some( + read_file_to_json_value(path)?["abi"] + .as_str() + .expect("Failed to parse abi") + .to_string(), + ) } pub fn bridgehub_contract() -> Contract { @@ -212,12 +215,12 @@ pub fn l2_message_root() -> Contract { } pub fn l2_rollup_da_validator_bytecode() -> Vec { - read_bytecode_from_path("contracts/l2-contracts/artifacts-zk/contracts/data-availability/RollupL2DAValidator.sol/RollupL2DAValidator.json") + read_bytecode("contracts/l2-contracts/artifacts-zk/contracts/data-availability/RollupL2DAValidator.sol/RollupL2DAValidator.json") } /// Reads bytecode from the path RELATIVE to the Cargo workspace location. pub fn read_bytecode(relative_path: impl AsRef + std::fmt::Debug) -> Vec { - read_bytecode_from_path(relative_path) + read_bytecode_from_path(relative_path).expect("Exists") } pub fn eth_contract() -> Contract { @@ -229,17 +232,25 @@ pub fn known_codes_contract() -> Contract { } /// Reads bytecode from a given path. -pub fn read_bytecode_from_path(artifact_path: impl AsRef + std::fmt::Debug) -> Vec { - let artifact = read_file_to_json_value(&artifact_path); - - let bytecode = artifact["bytecode"] - .as_str() - .unwrap_or_else(|| panic!("Bytecode not found in {:?}", artifact_path)) - .strip_prefix("0x") - .unwrap_or_else(|| panic!("Bytecode in {:?} is not hex", artifact_path)); +pub fn read_bytecode_from_path( + artifact_path: impl AsRef + std::fmt::Debug, +) -> Option> { + let artifact = read_file_to_json_value(&artifact_path)?; + + let bytecode = if let Some(bytecode) = artifact["bytecode"].as_str() { + bytecode + .strip_prefix("0x") + .unwrap_or_else(|| panic!("Bytecode in {:?} is not hex", artifact_path)) + } else { + artifact["bytecode"]["object"] + .as_str() + .unwrap_or_else(|| panic!("Bytecode not found in {:?}", artifact_path)) + }; - hex::decode(bytecode) - .unwrap_or_else(|err| panic!("Can't decode bytecode in {:?}: {}", artifact_path, err)) + Some( + hex::decode(bytecode) + .unwrap_or_else(|err| panic!("Can't decode bytecode in {:?}: {}", artifact_path, err)), + ) } pub fn read_sys_contract_bytecode(directory: &str, name: &str, lang: ContractLanguage) -> Vec { @@ -247,7 +258,7 @@ pub fn read_sys_contract_bytecode(directory: &str, name: &str, lang: ContractLan } static DEFAULT_SYSTEM_CONTRACTS_REPO: Lazy = - Lazy::new(SystemContractsRepo::from_env); + Lazy::new(SystemContractsRepo::default); /// Structure representing a system contract repository - that allows /// fetching contracts that are located there. @@ -257,14 +268,16 @@ pub struct SystemContractsRepo { pub root: PathBuf, } -impl SystemContractsRepo { +impl Default for SystemContractsRepo { /// Returns the default system contracts repository with directory based on the Cargo workspace location. - pub fn from_env() -> Self { + fn default() -> Self { SystemContractsRepo { root: home_path().join("contracts/system-contracts"), } } +} +impl SystemContractsRepo { pub fn read_sys_contract_bytecode( &self, directory: &str, @@ -272,23 +285,62 @@ impl SystemContractsRepo { lang: ContractLanguage, ) -> Vec { match lang { - ContractLanguage::Sol => read_bytecode_from_path(self.root.join(format!( - "artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json", - directory, name - ))), - ContractLanguage::Yul => read_zbin_bytecode_from_path(self.root.join(format!( - "contracts-preprocessed/{0}artifacts/{1}.yul.zbin", - directory, name - ))), + ContractLanguage::Sol => { + if let Some(contracts) = read_bytecode_from_path( + self.root + .join(format!("zkout/{0}{1}.sol/{1}.json", directory, name)), + ) { + contracts + } else { + read_bytecode_from_path(self.root.join(format!( + "artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json", + directory, name + ))) + .unwrap_or_else(|| { + panic!("One of the outputs should exists for {directory}{name}"); + }) + } + } + ContractLanguage::Yul => { + if let Some(contract) = read_bytecode_from_path(self.root.join(format!( + "zkout/{name}.yul/contracts-preprocessed/{directory}/{name}.yul.json", + ))) { + contract + } else { + read_yul_bytecode_by_path( + self.root + .join(format!("contracts-preprocessed/{directory}artifacts")), + name, + ) + } + } } } } pub fn read_bootloader_code(bootloader_type: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/build/artifacts/{}.yul.zbin", - bootloader_type - )) + if let Some(contract) = + read_bytecode_from_path(home_path().join("contracts/system-contracts").join(format!( + "zkout/{bootloader_type}.yul/contracts-preprocessed/bootloader/{bootloader_type}.yul.json", + ))) + { + return contract; + }; + + let artifacts_path = + Path::new(&home_path()).join("contracts/system-contracts/bootloader/build/artifacts"); + let bytecode_path = artifacts_path.join(format!("{bootloader_type}.yul.zbin")); + if fs::exists(bytecode_path).unwrap_or_default() { + read_yul_bytecode( + "contracts/system-contracts/bootloader/build/artifacts", + bootloader_type, + ) + } else { + read_yul_bytecode( + "contracts/system-contracts/bootloader/tests/artifacts", + bootloader_type, + ) + } } fn read_proved_batch_bootloader_bytecode() -> Vec { @@ -305,11 +357,48 @@ pub fn read_zbin_bytecode(relative_zbin_path: impl AsRef) -> Vec { read_zbin_bytecode_from_path(bytecode_path) } +pub fn read_yul_bytecode(relative_artifacts_path: &str, name: &str) -> Vec { + let artifacts_path = Path::new(&home_path()).join(relative_artifacts_path); + read_yul_bytecode_by_path(artifacts_path, name) +} + +pub fn read_yul_bytecode_by_path(artifacts_path: PathBuf, name: &str) -> Vec { + let bytecode_path = artifacts_path.join(format!("{name}.yul/{name}.yul.zbin")); + + // Legacy versions of zksolc use the following path for output data if a yul file is being compiled: .yul.zbin + // New zksolc versions use .yul/.yul.zbin, for consistency with solidity files compilation. + // In addition, the output of the legacy zksolc in this case is a binary file, while in new versions it is hex encoded. + if fs::exists(&bytecode_path) + .unwrap_or_else(|err| panic!("Invalid path: {bytecode_path:?}, {err}")) + { + read_zbin_bytecode_from_hex_file(bytecode_path) + } else { + let bytecode_path_legacy = artifacts_path.join(format!("{name}.yul.zbin")); + + if fs::exists(&bytecode_path_legacy) + .unwrap_or_else(|err| panic!("Invalid path: {bytecode_path_legacy:?}, {err}")) + { + read_zbin_bytecode_from_path(bytecode_path_legacy) + } else { + panic!("Can't find bytecode for '{name}' yul contract at {artifacts_path:?}") + } + } +} + /// Reads zbin bytecode from a given path. fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { fs::read(&bytecode_path) - .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) + .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {bytecode_path:?}: {err}")) +} + +/// Reads zbin bytecode from a given path as utf8 text file. +fn read_zbin_bytecode_from_hex_file(bytecode_path: PathBuf) -> Vec { + let bytes = fs::read(&bytecode_path) + .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {bytecode_path:?}: {err}")); + + hex::decode(bytes).unwrap_or_else(|err| panic!("Invalid input file: {bytecode_path:?}, {err}")) } + /// Hash of code and code which consists of 32 bytes words #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SystemContractCode { @@ -321,18 +410,23 @@ pub struct SystemContractCode { pub struct BaseSystemContracts { pub bootloader: SystemContractCode, pub default_aa: SystemContractCode, + /// Never filled in constructors for now. The only way to get the EVM emulator enabled is to call [`Self::with_evm_emulator()`]. + pub evm_emulator: Option, } #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq)] pub struct BaseSystemContractsHashes { pub bootloader: H256, pub default_aa: H256, + pub evm_emulator: Option, } impl PartialEq for BaseSystemContracts { fn eq(&self, other: &Self) -> bool { self.bootloader.hash == other.bootloader.hash && self.default_aa.hash == other.default_aa.hash + && self.evm_emulator.as_ref().map(|contract| contract.hash) + == other.evm_emulator.as_ref().map(|contract| contract.hash) } } @@ -356,14 +450,27 @@ impl BaseSystemContracts { BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, } } - // BaseSystemContracts with proved bootloader - for handling transactions. + + /// BaseSystemContracts with proved bootloader - for handling transactions. pub fn load_from_disk() -> Self { let bootloader_bytecode = read_proved_batch_bootloader_bytecode(); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + /// Loads the latest EVM emulator for these base system contracts. Logically, it only makes sense to do for the latest protocol version. + pub fn with_latest_evm_emulator(mut self) -> Self { + let bytecode = read_sys_contract_bytecode("", "EvmEmulator", ContractLanguage::Yul); + let hash = hash_bytecode(&bytecode); + self.evm_emulator = Some(SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }); + self + } + /// BaseSystemContracts with playground bootloader - used for handling eth_calls. pub fn playground() -> Self { let bootloader_bytecode = read_playground_batch_bootloader_bytecode(); @@ -425,6 +532,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn playground_post_protocol_defense() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn playground_gateway() -> Self { let bootloader_bytecode = read_zbin_bytecode( "contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin", @@ -496,6 +610,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn estimate_gas_post_protocol_defense() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn estimate_gas_gateway() -> Self { let bootloader_bytecode = read_zbin_bytecode( "contracts/system-contracts/bootloader/build/artifacts/fee_estimate.yul.zbin", @@ -508,6 +629,7 @@ impl BaseSystemContracts { BaseSystemContractsHashes { bootloader: self.bootloader.hash, default_aa: self.default_aa.hash, + evm_emulator: self.evm_emulator.as_ref().map(|contract| contract.hash), } } } diff --git a/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json new file mode 100644 index 00000000000..189e28f565d --- /dev/null +++ b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n index_in_block,\n miniblocks.number AS \"miniblock_number!\",\n miniblocks.hash AS \"miniblocks_hash!\"\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "miniblock_number!", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "miniblocks_hash!", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + true, + true, + false, + false + ] + }, + "hash": "0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69" +} diff --git a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json b/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json deleted file mode 100644 index 05b94ad249a..00000000000 --- a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n input_blob_url = $4\n WHERE\n l1_batch_number = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text" - ] - }, - "nullable": [] - }, - "hash": "0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04" -} diff --git a/core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json b/core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json similarity index 53% rename from core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json rename to core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json index 9cf4cc1e68e..36879466039 100644 --- a/core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json +++ b/core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n is_sealed\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ true ] }, - "hash": "1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03" + "hash": "16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47" } diff --git a/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json b/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json new file mode 100644 index 00000000000..b40bdca666b --- /dev/null +++ b/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM l1_batches\n WHERE\n number > $1\n AND NOT is_sealed\n RETURNING number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78" +} diff --git a/core/lib/dal/.sqlx/query-ce6d5796dcc7c105fe3b3081b70327982ab744c7566645e9b0c69364f7021c5a.json b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json similarity index 75% rename from core/lib/dal/.sqlx/query-ce6d5796dcc7c105fe3b3081b70327982ab744c7566645e9b0c69364f7021c5a.json rename to core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json index c3a238fb8ba..48adcd41267 100644 --- a/core/lib/dal/.sqlx/query-ce6d5796dcc7c105fe3b3081b70327982ab744c7566645e9b0c69364f7021c5a.json +++ b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -180,15 +190,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "ce6d5796dcc7c105fe3b3081b70327982ab744c7566645e9b0c69364f7021c5a" + "hash": "1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7" } diff --git a/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json b/core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json similarity index 73% rename from core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json rename to core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json index 853acb9f71a..a101edbb9ea 100644 --- a/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json +++ b/core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_refunds,\n pubdata_costs\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n storage_refunds,\n pubdata_costs\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ true ] }, - "hash": "cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a" + "hash": "1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec" } diff --git a/core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json b/core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json similarity index 71% rename from core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json rename to core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json index fc36e47b54c..1078e0b57f6 100644 --- a/core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json +++ b/core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n initial_bootloader_heap_content\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n initial_bootloader_heap_content\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea" + "hash": "1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c" } diff --git a/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json b/core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json similarity index 62% rename from core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json rename to core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json index 433564c6ae0..aa657582690 100644 --- a/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json +++ b/core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n (\n SELECT\n l1_batch_number\n FROM\n miniblocks\n WHERE\n number = $1\n ) AS \"block_batch?\",\n COALESCE(\n (\n SELECT\n MAX(number) + 1\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n ),\n 0\n ) AS \"pending_batch!\"\n ", + "query": "\n SELECT\n (\n SELECT\n l1_batch_number\n FROM\n miniblocks\n WHERE\n number = $1\n ) AS \"block_batch?\",\n COALESCE(\n (\n SELECT\n MAX(number) + 1\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n ),\n 0\n ) AS \"pending_batch!\"\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ null ] }, - "hash": "c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5" + "hash": "1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8" } diff --git a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json similarity index 50% rename from core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json rename to core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json index 06d3461c3fa..4f44879b6ec 100644 --- a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json +++ b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json @@ -1,12 +1,17 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n protocol_version,\n hash\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { "ordinal": 0, "name": "protocol_version", "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "hash", + "type_info": "Bytea" } ], "parameters": { @@ -15,8 +20,9 @@ ] }, "nullable": [ - true + true, + false ] }, - "hash": "894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2" + "hash": "2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850" } diff --git a/core/lib/dal/.sqlx/query-269e5901aaa362ed011a2e968d2bc8cc8877e5d1d9c2d9b04953fa7d89155b40.json b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json similarity index 70% rename from core/lib/dal/.sqlx/query-269e5901aaa362ed011a2e968d2bc8cc8877e5d1d9c2d9b04953fa7d89155b40.json rename to core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json index 2b094a5f24f..5c4ce3d6a4e 100644 --- a/core/lib/dal/.sqlx/query-269e5901aaa362ed011a2e968d2bc8cc8877e5d1d9c2d9b04953fa7d89155b40.json +++ b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n l2_da_validator_address AS \"l2_da_validator_address!\",\n pubdata_type AS \"pubdata_type!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -35,68 +35,73 @@ }, { "ordinal": 6, - "name": "l2_da_validator_address!", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "pubdata_type!", - "type_info": "Text" - }, - { - "ordinal": 8, "name": "base_fee_per_gas", "type_info": "Numeric" }, { - "ordinal": 9, + "ordinal": 7, "name": "l1_gas_price", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 8, "name": "l2_fair_gas_price", "type_info": "Int8" }, { - "ordinal": 11, + "ordinal": 9, "name": "gas_per_pubdata_limit", "type_info": "Int8" }, { - "ordinal": 12, + "ordinal": 10, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 11, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 15, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 17, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 18, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -113,16 +118,17 @@ false, false, false, - false, - false, + true, true, true, true, false, true, true, - true + true, + false, + false ] }, - "hash": "269e5901aaa362ed011a2e968d2bc8cc8877e5d1d9c2d9b04953fa7d89155b40" + "hash": "250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8" } diff --git a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json b/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json deleted file mode 100644 index 9d8cc36189f..00000000000 --- a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = $2\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text" - ] - }, - "nullable": [ - false - ] - }, - "hash": "2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d" -} diff --git a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json b/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json deleted file mode 100644 index a273eb249a4..00000000000 --- a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM tee_verifier_input_producer_jobs\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [] - }, - "nullable": [] - }, - "hash": "2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d" -} diff --git a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json b/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json deleted file mode 100644 index 6012c632651..00000000000 --- a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n error = $4\n WHERE\n l1_batch_number = $2\n AND status != $5\n RETURNING\n tee_verifier_input_producer_jobs.attempts\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [ - false - ] - }, - "hash": "3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79" -} diff --git a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json b/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json deleted file mode 100644 index 7245fa3059e..00000000000 --- a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode\n FROM\n (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "bytecode", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] - }, - "nullable": [ - false - ] - }, - "hash": "369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980" -} diff --git a/core/lib/dal/.sqlx/query-4d1d409b2405a4105feb140720abb480be336b68c127e442ee1bfd177597bd8b.json b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json similarity index 68% rename from core/lib/dal/.sqlx/query-4d1d409b2405a4105feb140720abb480be336b68c127e442ee1bfd177597bd8b.json rename to core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json index c5b5afe85e4..11bff110293 100644 --- a/core/lib/dal/.sqlx/query-4d1d409b2405a4105feb140720abb480be336b68c127e442ee1bfd177597bd8b.json +++ b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -186,15 +196,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "4d1d409b2405a4105feb140720abb480be336b68c127e442ee1bfd177597bd8b" + "hash": "45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746" } diff --git a/core/lib/dal/.sqlx/query-4e1db16b582aa347dc33fccd8d8afa60b5ca8ce096bfb79172b1b55264f6c987.json b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json similarity index 75% rename from core/lib/dal/.sqlx/query-4e1db16b582aa347dc33fccd8d8afa60b5ca8ce096bfb79172b1b55264f6c987.json rename to core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json index 0c6bb9ca4d2..66d3e18075b 100644 --- a/core/lib/dal/.sqlx/query-4e1db16b582aa347dc33fccd8d8afa60b5ca8ce096bfb79172b1b55264f6c987.json +++ b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -184,15 +194,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "4e1db16b582aa347dc33fccd8d8afa60b5ca8ce096bfb79172b1b55264f6c987" + "hash": "4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970" } diff --git a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json b/core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json similarity index 76% rename from core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json rename to core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json index 4a73fde57e2..804318120fc 100644 --- a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json +++ b/core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -55,18 +55,28 @@ }, { "ordinal": 10, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 11, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 11, + "ordinal": 12, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 12, + "ordinal": 13, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -86,9 +96,11 @@ true, true, true, + true, false, - true + true, + false ] }, - "hash": "454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd" + "hash": "4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec" } diff --git a/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json b/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json new file mode 100644 index 00000000000..2cd528a9f53 --- /dev/null +++ b/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966" +} diff --git a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json b/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json deleted file mode 100644 index f34c4a548cb..00000000000 --- a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78" -} diff --git a/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json b/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json deleted file mode 100644 index c2d9fe2e1ac..00000000000 --- a/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974" -} diff --git a/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json b/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json new file mode 100644 index 00000000000..c95a5bc6bd4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a" +} diff --git a/core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json b/core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json similarity index 57% rename from core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json rename to core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json index 7c3a261d1f6..95957160124 100644 --- a/core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json +++ b/core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_prove_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_execute_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d" + "hash": "5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6" } diff --git a/core/lib/dal/.sqlx/query-670f7d170122b6165ea521c482f2ec32d637a8c11af6472b9b390c6ca2b68495.json b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json similarity index 76% rename from core/lib/dal/.sqlx/query-670f7d170122b6165ea521c482f2ec32d637a8c11af6472b9b390c6ca2b68495.json rename to core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json index 74a426f2039..dfdb4b6c82e 100644 --- a/core/lib/dal/.sqlx/query-670f7d170122b6165ea521c482f2ec32d637a8c11af6472b9b390c6ca2b68495.json +++ b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "system_logs", - "type_info": "ByteaArray" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,36 +120,46 @@ }, { "ordinal": 23, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -181,16 +191,18 @@ true, true, true, - false, true, true, + false, + true, true, true, + false, true, true, true, true ] }, - "hash": "670f7d170122b6165ea521c482f2ec32d637a8c11af6472b9b390c6ca2b68495" + "hash": "62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37" } diff --git a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json b/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json deleted file mode 100644 index 01ede1d8643..00000000000 --- a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_verifier_input_producer_jobs (\n l1_batch_number, status, created_at, updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [] - }, - "hash": "6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199" -} diff --git a/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json b/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json new file mode 100644 index 00000000000..306f193861f --- /dev/null +++ b/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE l1_batches\n SET\n l1_tx_count = $2,\n l2_tx_count = $3,\n l2_to_l1_messages = $4,\n bloom = $5,\n priority_ops_onchain_data = $6,\n predicted_commit_gas_cost = $7,\n predicted_prove_gas_cost = $8,\n predicted_execute_gas_cost = $9,\n initial_bootloader_heap_content = $10,\n used_contract_hashes = $11,\n bootloader_code_hash = $12,\n default_aa_code_hash = $13,\n evm_emulator_code_hash = $14,\n protocol_version = $15,\n system_logs = $16,\n storage_refunds = $17,\n pubdata_costs = $18,\n pubdata_input = $19,\n predicted_circuits_by_type = $20,\n updated_at = NOW(),\n is_sealed = TRUE\n WHERE\n number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "ByteaArray", + "Int8Array", + "Int8Array", + "Bytea", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a" +} diff --git a/core/lib/dal/.sqlx/query-9ea417e4ffef9e5d158089723692ba43fe8560be0c4aa7baa49e71b2a28187e7.json b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json similarity index 60% rename from core/lib/dal/.sqlx/query-9ea417e4ffef9e5d158089723692ba43fe8560be0c4aa7baa49e71b2a28187e7.json rename to core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json index ebed8e99f5b..6cc2e22382d 100644 --- a/core/lib/dal/.sqlx/query-9ea417e4ffef9e5d158089723692ba43fe8560be0c4aa7baa49e71b2a28187e7.json +++ b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\",\n miniblocks.l2_da_validator_address AS \"l2_da_validator_address!\",\n miniblocks.pubdata_type AS \"pubdata_type!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\",\n miniblocks.l2_da_validator_address AS \"l2_da_validator_address!\",\n miniblocks.pubdata_type AS \"pubdata_type!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -50,31 +50,36 @@ }, { "ordinal": 9, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 10, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 11, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 12, "name": "protocol_version!", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 13, "name": "fee_account_address!", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 14, "name": "l2_da_validator_address!", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 15, "name": "pubdata_type!", "type_info": "Text" } @@ -95,6 +100,7 @@ true, true, true, + true, false, false, true, @@ -103,5 +109,5 @@ false ] }, - "hash": "9ea417e4ffef9e5d158089723692ba43fe8560be0c4aa7baa49e71b2a28187e7" + "hash": "7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2" } diff --git a/core/lib/dal/.sqlx/query-66c2d8f27715ee11b0a7c4b9fd7e2e6718eea8ba12757ec77889233542b15b40.json b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json similarity index 78% rename from core/lib/dal/.sqlx/query-66c2d8f27715ee11b0a7c4b9fd7e2e6718eea8ba12757ec77889233542b15b40.json rename to core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json index e26688c658e..f4e08abe31c 100644 --- a/core/lib/dal/.sqlx/query-66c2d8f27715ee11b0a7c4b9fd7e2e6718eea8ba12757ec77889233542b15b40.json +++ b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "compressed_state_diffs", - "type_info": "Bytea" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,36 +120,46 @@ }, { "ordinal": 23, - "name": "events_queue_commitment", + "name": "compressed_state_diffs", "type_info": "Bytea" }, { "ordinal": 24, - "name": "bootloader_initial_content_commitment", + "name": "events_queue_commitment", "type_info": "Bytea" }, { "ordinal": 25, - "name": "pubdata_input", + "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { "ordinal": 26, - "name": "aggregation_root", + "name": "pubdata_input", "type_info": "Bytea" }, { "ordinal": 27, - "name": "local_root", + "name": "fee_address", "type_info": "Bytea" }, { "ordinal": 28, - "name": "state_diff_hash", + "name": "aggregation_root", "type_info": "Bytea" }, { "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -187,10 +197,12 @@ true, true, true, + false, + true, true, true, true ] }, - "hash": "66c2d8f27715ee11b0a7c4b9fd7e2e6718eea8ba12757ec77889233542b15b40" + "hash": "77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9" } diff --git a/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json new file mode 100644 index 00000000000..f89f531c446 --- /dev/null +++ b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Bytea", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Text" + ] + }, + "nullable": [] + }, + "hash": "7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba" +} diff --git a/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json b/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json new file mode 100644 index 00000000000..df856b97702 --- /dev/null +++ b/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n timestamp,\n protocol_version,\n fee_address,\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n l1_batches\n WHERE\n NOT is_sealed\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 6, + "name": "fair_pubdata_price", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + true, + false, + false, + false, + false + ] + }, + "hash": "8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a" +} diff --git a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json similarity index 62% rename from core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json rename to core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json index 162c722add9..d944b6abf9e 100644 --- a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json +++ b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_versions.evm_emulator_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -25,11 +25,16 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "patch", "type_info": "Int4" }, { - "ordinal": 5, + "ordinal": 6, "name": "snark_wrapper_vk_hash", "type_info": "Bytea" } @@ -44,9 +49,10 @@ false, false, false, + true, false, false ] }, - "hash": "c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7" + "hash": "89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b" } diff --git a/core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json b/core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json similarity index 72% rename from core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json rename to core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json index 100761f54b4..ea2b51d69d1 100644 --- a/core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json +++ b/core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ true ] }, - "hash": "0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41" + "hash": "8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20" } diff --git a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json b/core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json similarity index 73% rename from core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json rename to core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json index 9b989a9ba25..82af00b5606 100644 --- a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json +++ b/core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c" + "hash": "8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2" } diff --git a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json b/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json deleted file mode 100644 index 3b8accb4fda..00000000000 --- a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "index_in_block", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - true, - true - ] - }, - "hash": "96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d" -} diff --git a/core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json b/core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json similarity index 69% rename from core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json rename to core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json index f9799079442..08e3b4b17a9 100644 --- a/core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json +++ b/core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MIN(number) AS \"min?\"\n FROM\n l1_batches\n WHERE\n protocol_version = $1\n ", + "query": "\n SELECT\n MIN(number) AS \"min?\"\n FROM\n l1_batches\n WHERE\n is_sealed\n AND protocol_version = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ null ] }, - "hash": "86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d" + "hash": "9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77" } diff --git a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json b/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json deleted file mode 100644 index d2c999a70d4..00000000000 --- a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Bytea", - "Bytea", - "Int4", - "ByteaArray", - "Int8Array", - "Int8Array", - "Bytea", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e" -} diff --git a/core/lib/dal/.sqlx/query-dc9a3821560030a8daf8dbdafe5f52aed204a20c67a6b959b16c2a60c745321e.json b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json similarity index 71% rename from core/lib/dal/.sqlx/query-dc9a3821560030a8daf8dbdafe5f52aed204a20c67a6b959b16c2a60c745321e.json rename to core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json index 269c3ebb328..9a93ba45978 100644 --- a/core/lib/dal/.sqlx/query-dc9a3821560030a8daf8dbdafe5f52aed204a20c67a6b959b16c2a60c745321e.json +++ b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -181,17 +191,19 @@ true, true, true, + true, false, true, true, true, true, true, + false, true, true, true, true ] }, - "hash": "dc9a3821560030a8daf8dbdafe5f52aed204a20c67a6b959b16c2a60c745321e" + "hash": "a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789" } diff --git a/core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json b/core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json similarity index 73% rename from core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json rename to core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json index 56fcdb38943..9a1b043e573 100644 --- a/core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json +++ b/core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1" + "hash": "a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026" } diff --git a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json similarity index 73% rename from core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json rename to core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json index 81ae6c590f9..28ffcc5ae46 100644 --- a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json +++ b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", "describe": { "columns": [ { @@ -82,6 +82,11 @@ "ordinal": 15, "name": "default_aa_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -105,8 +110,9 @@ false, true, true, + true, true ] }, - "hash": "2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1" + "hash": "a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6" } diff --git a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json b/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json deleted file mode 100644 index b17b5828211..00000000000 --- a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $2\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $3\n AND (\n proofs.status = $4\n OR (\n proofs.status = $1\n AND proofs.prover_taken_at < NOW() - $5::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $6\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text", - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0" -} diff --git a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json similarity index 54% rename from core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json rename to core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json index 8c41c0ab976..9d9fa72595d 100644 --- a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json +++ b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW())\n ON CONFLICT DO NOTHING\n ", + "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, NOW())\n ON CONFLICT DO NOTHING\n ", "describe": { "columns": [], "parameters": { @@ -9,10 +9,11 @@ "Int8", "Bytea", "Bytea", + "Bytea", "Bytea" ] }, "nullable": [] }, - "hash": "048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016" + "hash": "b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917" } diff --git a/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json b/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json new file mode 100644 index 00000000000..78b913fcc36 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches (\n number,\n timestamp,\n protocol_version,\n fee_address,\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n initial_bootloader_heap_content,\n used_contract_hashes,\n created_at,\n updated_at,\n is_sealed\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n 0,\n 0,\n ''::bytea,\n '{}'::bytea [],\n '{}'::jsonb,\n '{}'::jsonb,\n NOW(),\n NOW(),\n FALSE\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Int4", + "Bytea", + "Int8", + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798" +} diff --git a/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json b/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json deleted file mode 100644 index 120fac1021f..00000000000 --- a/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c" -} diff --git a/core/lib/dal/.sqlx/query-31308e6469a98e9662ff284a89ce264ca7b68c54d894fad9d760324455321080.json b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json similarity index 71% rename from core/lib/dal/.sqlx/query-31308e6469a98e9662ff284a89ce264ca7b68c54d894fad9d760324455321080.json rename to core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json index 71da9df70be..8a68b1a9b9b 100644 --- a/core/lib/dal/.sqlx/query-31308e6469a98e9662ff284a89ce264ca7b68c54d894fad9d760324455321080.json +++ b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -185,15 +195,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "31308e6469a98e9662ff284a89ce264ca7b68c54d894fad9d760324455321080" + "hash": "b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd" } diff --git a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json similarity index 52% rename from core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json rename to core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json index eba36994fb3..2689716c38a 100644 --- a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json +++ b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -12,6 +12,11 @@ "ordinal": 1, "name": "default_account_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -21,8 +26,9 @@ }, "nullable": [ false, - false + false, + true ] }, - "hash": "5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6" + "hash": "bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456" } diff --git a/core/lib/dal/.sqlx/query-db3593883d5e1e636d65e25cc744637fc33467fbd64da5a431ecab194409371c.json b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json similarity index 73% rename from core/lib/dal/.sqlx/query-db3593883d5e1e636d65e25cc744637fc33467fbd64da5a431ecab194409371c.json rename to core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json index 40ed6af677a..f97ea8a6ccd 100644 --- a/core/lib/dal/.sqlx/query-db3593883d5e1e636d65e25cc744637fc33467fbd64da5a431ecab194409371c.json +++ b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -182,15 +192,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "db3593883d5e1e636d65e25cc744637fc33467fbd64da5a431ecab194409371c" + "hash": "c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b" } diff --git a/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json new file mode 100644 index 00000000000..20b79199165 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n bytecode_hash,\n bytecode\n FROM\n (\n SELECT\n value\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) deploy_log\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "bytecode_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "bytecode", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967" +} diff --git a/core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json b/core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json similarity index 74% rename from core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json rename to core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json index c63ea98db44..8f6d1cf7a5f 100644 --- a/core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json +++ b/core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8" + "hash": "cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b" } diff --git a/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json b/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json new file mode 100644 index 00000000000..4b219bfee0a --- /dev/null +++ b/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320" +} diff --git a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json b/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json similarity index 55% rename from core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json rename to core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json index f440a265593..ed3270de573 100644 --- a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json +++ b/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -90,11 +90,16 @@ }, { "ordinal": 17, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 18, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 18, + "ordinal": 19, "name": "fee_account_address", "type_info": "Bytea" } @@ -123,8 +128,9 @@ true, true, true, + true, false ] }, - "hash": "b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad" + "hash": "d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8" } diff --git a/core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json b/core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json similarity index 57% rename from core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json rename to core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json index 0370a63d65e..15d6096420f 100644 --- a/core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json +++ b/core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_commit_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99" + "hash": "d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef" } diff --git a/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json b/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json new file mode 100644 index 00000000000..0aac086f22a --- /dev/null +++ b/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\"\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e" +} diff --git a/core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json b/core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json similarity index 57% rename from core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json rename to core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json index 3052b3a04d1..baabbdb4f24 100644 --- a/core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json +++ b/core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_execute_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_prove_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b" + "hash": "d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c" } diff --git a/core/lib/dal/.sqlx/query-7af141a4533b332903b7ba5591b1c90ac9deb75cd2a542fe649d7830496a0756.json b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json similarity index 72% rename from core/lib/dal/.sqlx/query-7af141a4533b332903b7ba5591b1c90ac9deb75cd2a542fe649d7830496a0756.json rename to core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json index eb6d9c3640d..111234e02b7 100644 --- a/core/lib/dal/.sqlx/query-7af141a4533b332903b7ba5591b1c90ac9deb75cd2a542fe649d7830496a0756.json +++ b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n l2_da_validator_address AS \"l2_da_validator_address!\",\n pubdata_type AS \"pubdata_type!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -35,68 +35,73 @@ }, { "ordinal": 6, - "name": "l2_da_validator_address!", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "pubdata_type!", - "type_info": "Text" - }, - { - "ordinal": 8, "name": "base_fee_per_gas", "type_info": "Numeric" }, { - "ordinal": 9, + "ordinal": 7, "name": "l1_gas_price", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 8, "name": "l2_fair_gas_price", "type_info": "Int8" }, { - "ordinal": 11, + "ordinal": 9, "name": "gas_per_pubdata_limit", "type_info": "Int8" }, { - "ordinal": 12, + "ordinal": 10, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 11, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 15, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 17, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 18, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -115,16 +120,17 @@ false, false, false, - false, - false, + true, true, true, true, false, true, true, - true + true, + false, + false ] }, - "hash": "7af141a4533b332903b7ba5591b1c90ac9deb75cd2a542fe649d7830496a0756" + "hash": "d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9" } diff --git a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json b/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json deleted file mode 100644 index fa1a5d6741a..00000000000 --- a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n status = $2\n OR (\n status = $1\n AND processing_started_at < NOW() - $4::INTERVAL\n )\n OR (\n status = $3\n AND attempts < $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_verifier_input_producer_jobs.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Interval", - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860" -} diff --git a/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json b/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json deleted file mode 100644 index 2598be6267d..00000000000 --- a/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\"\n FROM\n miniblocks\n WHERE\n number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005" -} diff --git a/core/lib/dal/.sqlx/query-f461f21fcc8b8e88d7cb8cfc38a15f75badf7801f687af19163f5f533e20fbc7.json b/core/lib/dal/.sqlx/query-f461f21fcc8b8e88d7cb8cfc38a15f75badf7801f687af19163f5f533e20fbc7.json deleted file mode 100644 index 126a7bafc00..00000000000 --- a/core/lib/dal/.sqlx/query-f461f21fcc8b8e88d7cb8cfc38a15f75badf7801f687af19163f5f533e20fbc7.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n l2_da_validator_address,\n pubdata_type,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Bytea", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4", - "Int8", - "Int8", - "Int8", - "Bytea", - "Text", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "f461f21fcc8b8e88d7cb8cfc38a15f75badf7801f687af19163f5f533e20fbc7" -} diff --git a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json similarity index 71% rename from core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json rename to core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json index 5e9051587bb..1b50a750dac 100644 --- a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json +++ b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -25,6 +25,11 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "upgrade_tx_hash", "type_info": "Bytea" } @@ -39,8 +44,9 @@ false, false, false, + true, true ] }, - "hash": "5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355" + "hash": "f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a" } diff --git a/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json new file mode 100644 index 00000000000..12e28266fbc --- /dev/null +++ b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n WHERE\n proofs.status = $1\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662" +} diff --git a/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json similarity index 58% rename from core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json rename to core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json index 61497cdb169..c34d38ac2d0 100644 --- a/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json +++ b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n WHERE\n number >= $1\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -10,13 +10,11 @@ } ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] }, "nullable": [ false ] }, - "hash": "d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977" + "hash": "fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b" } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index ccca49525e4..db03b8de982 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -22,8 +22,11 @@ zksync_types.workspace = true zksync_concurrency.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_storage.workspace = true +zksync_consensus_crypto.workspace = true +zksync_consensus_utils.workspace = true zksync_protobuf.workspace = true zksync_db_connection.workspace = true +zksync_l1_contract_interface.workspace = true itertools.workspace = true thiserror.workspace = true diff --git a/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql new file mode 100644 index 00000000000..3706fc6630b --- /dev/null +++ b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + RENAME COLUMN is_sealed TO is_finished; +ALTER table l1_batches + DROP COLUMN fair_pubdata_price, + DROP COLUMN fee_address; diff --git a/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql new file mode 100644 index 00000000000..6b08546ea1e --- /dev/null +++ b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + RENAME COLUMN is_finished TO is_sealed; +ALTER table l1_batches + ADD COLUMN fair_pubdata_price bigint NOT NULL DEFAULT 0, + ADD COLUMN fee_address bytea NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea; diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql new file mode 100644 index 00000000000..74ac4e60383 --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE protocol_versions DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE miniblocks DROP COLUMN IF EXISTS evm_emulator_code_hash; diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql new file mode 100644 index 00000000000..43ae361e7ee --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql @@ -0,0 +1,4 @@ +ALTER TABLE protocol_versions ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +-- We need this column in `miniblocks` as well in order to store data for the pending L1 batch +ALTER TABLE miniblocks ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql new file mode 100644 index 00000000000..707ce306365 --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql @@ -0,0 +1,20 @@ +CREATE TABLE tee_verifier_input_producer_jobs ( + l1_batch_number BIGINT NOT NULL, + status TEXT NOT NULL, + signature BYTEA, + pubkey BYTEA, + proof BYTEA, + tee_type TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + prover_taken_at TIMESTAMP, + PRIMARY KEY (l1_batch_number, tee_type), + CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey FOREIGN KEY (l1_batch_number) REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) ON DELETE CASCADE, + CONSTRAINT tee_proof_generation_details_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES tee_attestations(pubkey) ON DELETE SET NULL +); + +ALTER TABLE tee_proof_generation_details + ADD CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) + REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) + ON DELETE CASCADE; diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql new file mode 100644 index 00000000000..c2417ba86b3 --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE tee_proof_generation_details DROP CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey; + +DROP TABLE IF EXISTS tee_verifier_input_producer_jobs; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index daa0fd8b79a..f1419865601 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -17,9 +17,10 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, block::{ BlockGasCount, L1BatchHeader, L1BatchStatistics, L1BatchTreeData, L2BlockHeader, - StorageOracleInfo, + StorageOracleInfo, UnsealedL1BatchHeader, }, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, + fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, writes::TreeWrite, Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, @@ -30,7 +31,9 @@ pub use crate::models::storage_block::{L1BatchMetadataError, L1BatchWithOptional use crate::{ models::{ parse_protocol_version, - storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader}, + storage_block::{ + StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader, UnsealedStorageL1Batch, + }, storage_event::StorageL2ToL1Log, storage_oracle_info::DbStorageOracleInfo, }, @@ -89,6 +92,8 @@ impl BlocksDal<'_, '_> { COUNT(*) AS "count!" FROM l1_batches + WHERE + is_sealed "# ) .instrument("is_genesis_needed") @@ -105,6 +110,8 @@ impl BlocksDal<'_, '_> { MAX(number) AS "number" FROM l1_batches + WHERE + is_sealed "# ) .instrument("get_sealed_l1_batch_number") @@ -140,6 +147,8 @@ impl BlocksDal<'_, '_> { MIN(number) AS "number" FROM l1_batches + WHERE + is_sealed "# ) .instrument("get_earliest_l1_batch_number") @@ -325,6 +334,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -334,6 +344,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -345,7 +356,8 @@ impl BlocksDal<'_, '_> { data_availability ON data_availability.l1_batch_number = l1_batches.number WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -373,13 +385,16 @@ impl BlocksDal<'_, '_> { used_contract_hashes, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, system_logs, - pubdata_input + pubdata_input, + fee_address FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -412,7 +427,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -443,7 +459,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -564,7 +581,109 @@ impl BlocksDal<'_, '_> { Ok(()) } + /// Inserts an unsealed L1 batch with some basic information (i.e. runtime related data is either + /// null or set to default value for the corresponding type). pub async fn insert_l1_batch( + &mut self, + unsealed_batch_header: UnsealedL1BatchHeader, + ) -> DalResult<()> { + Self::insert_l1_batch_inner(unsealed_batch_header, self.storage).await + } + + async fn insert_l1_batch_inner( + unsealed_batch_header: UnsealedL1BatchHeader, + conn: &mut Connection<'_, Core>, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + l1_batches ( + number, + timestamp, + protocol_version, + fee_address, + l1_gas_price, + l2_fair_gas_price, + fair_pubdata_price, + l1_tx_count, + l2_tx_count, + bloom, + priority_ops_onchain_data, + initial_bootloader_heap_content, + used_contract_hashes, + created_at, + updated_at, + is_sealed + ) + VALUES + ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + 0, + 0, + ''::bytea, + '{}'::bytea [], + '{}'::jsonb, + '{}'::jsonb, + NOW(), + NOW(), + FALSE + ) + "#, + i64::from(unsealed_batch_header.number.0), + unsealed_batch_header.timestamp as i64, + unsealed_batch_header.protocol_version.map(|v| v as i32), + unsealed_batch_header.fee_address.as_bytes(), + unsealed_batch_header.fee_input.l1_gas_price() as i64, + unsealed_batch_header.fee_input.fair_l2_gas_price() as i64, + unsealed_batch_header.fee_input.fair_pubdata_price() as i64, + ) + .instrument("insert_l1_batch") + .with_arg("number", &unsealed_batch_header.number) + .execute(conn) + .await?; + Ok(()) + } + + pub async fn ensure_unsealed_l1_batch_exists( + &mut self, + unsealed_batch: UnsealedL1BatchHeader, + ) -> anyhow::Result<()> { + let mut transaction = self.storage.start_transaction().await?; + let unsealed_batch_fetched = Self::get_unsealed_l1_batch_inner(&mut transaction).await?; + + match unsealed_batch_fetched { + None => { + tracing::info!( + "Unsealed batch #{} could not be found; inserting", + unsealed_batch.number + ); + Self::insert_l1_batch_inner(unsealed_batch, &mut transaction).await?; + } + Some(unsealed_batch_fetched) => { + if unsealed_batch_fetched.number != unsealed_batch.number { + anyhow::bail!( + "fetched unsealed L1 batch #{} does not conform to expected L1 batch #{}", + unsealed_batch_fetched.number, + unsealed_batch.number + ) + } + } + } + + transaction.commit().await?; + Ok(()) + } + + /// Marks provided L1 batch as sealed and populates it with all the runtime information. + /// + /// Errors if the batch does not exist. + pub async fn mark_l1_batch_as_sealed( &mut self, header: &L1BatchHeader, initial_bootloader_contents: &[(usize, U256)], @@ -572,9 +691,9 @@ impl BlocksDal<'_, '_> { storage_refunds: &[u32], pubdata_costs: &[i32], predicted_circuits_by_type: CircuitStatistic, // predicted number of circuits for each circuit type - ) -> DalResult<()> { + ) -> anyhow::Result<()> { let initial_bootloader_contents_len = initial_bootloader_contents.len(); - let instrumentation = Instrumented::new("insert_l1_batch") + let instrumentation = Instrumented::new("mark_l1_batch_as_sealed") .with_arg("number", &header.number) .with_arg( "initial_bootloader_contents.len", @@ -601,61 +720,35 @@ impl BlocksDal<'_, '_> { let query = sqlx::query!( r#" - INSERT INTO - l1_batches ( - number, - l1_tx_count, - l2_tx_count, - timestamp, - l2_to_l1_messages, - bloom, - priority_ops_onchain_data, - predicted_commit_gas_cost, - predicted_prove_gas_cost, - predicted_execute_gas_cost, - initial_bootloader_heap_content, - used_contract_hashes, - bootloader_code_hash, - default_aa_code_hash, - protocol_version, - system_logs, - storage_refunds, - pubdata_costs, - pubdata_input, - predicted_circuits_by_type, - created_at, - updated_at - ) - VALUES - ( - $1, - $2, - $3, - $4, - $5, - $6, - $7, - $8, - $9, - $10, - $11, - $12, - $13, - $14, - $15, - $16, - $17, - $18, - $19, - $20, - NOW(), - NOW() - ) + UPDATE l1_batches + SET + l1_tx_count = $2, + l2_tx_count = $3, + l2_to_l1_messages = $4, + bloom = $5, + priority_ops_onchain_data = $6, + predicted_commit_gas_cost = $7, + predicted_prove_gas_cost = $8, + predicted_execute_gas_cost = $9, + initial_bootloader_heap_content = $10, + used_contract_hashes = $11, + bootloader_code_hash = $12, + default_aa_code_hash = $13, + evm_emulator_code_hash = $14, + protocol_version = $15, + system_logs = $16, + storage_refunds = $17, + pubdata_costs = $18, + pubdata_input = $19, + predicted_circuits_by_type = $20, + updated_at = NOW(), + is_sealed = TRUE + WHERE + number = $1 "#, i64::from(header.number.0), i32::from(header.l1_tx_count), i32::from(header.l2_tx_count), - header.timestamp as i64, &header.l2_to_l1_messages, header.bloom.as_bytes(), &priority_onchain_data, @@ -666,6 +759,11 @@ impl BlocksDal<'_, '_> { used_contract_hashes, header.base_system_contracts_hashes.bootloader.as_bytes(), header.base_system_contracts_hashes.default_aa.as_bytes(), + header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), header.protocol_version.map(|v| v as i32), &system_logs, &storage_refunds, @@ -673,13 +771,47 @@ impl BlocksDal<'_, '_> { pubdata_input, serde_json::to_value(predicted_circuits_by_type).unwrap(), ); + let update_result = instrumentation.with(query).execute(self.storage).await?; - let mut transaction = self.storage.start_transaction().await?; - instrumentation - .with(query) - .execute(&mut transaction) - .await?; - transaction.commit().await + if update_result.rows_affected() == 0 { + anyhow::bail!( + "L1 batch sealing failed: batch #{} was not found", + header.number + ); + } + + Ok(()) + } + + pub async fn get_unsealed_l1_batch(&mut self) -> DalResult> { + Self::get_unsealed_l1_batch_inner(self.storage).await + } + + async fn get_unsealed_l1_batch_inner( + conn: &mut Connection<'_, Core>, + ) -> DalResult> { + let batch = sqlx::query_as!( + UnsealedStorageL1Batch, + r#" + SELECT + number, + timestamp, + protocol_version, + fee_address, + l1_gas_price, + l2_fair_gas_price, + fair_pubdata_price + FROM + l1_batches + WHERE + NOT is_sealed + "#, + ) + .instrument("get_unsealed_l1_batch") + .fetch_optional(conn) + .await?; + + Ok(batch.map(|b| b.into())) } pub async fn insert_l2_block(&mut self, l2_block_header: &L2BlockHeader) -> DalResult<()> { @@ -710,13 +842,14 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, gas_limit, + logs_bloom, l2_da_validator_address, pubdata_type, - logs_bloom, created_at, updated_at ) @@ -741,6 +874,7 @@ impl BlocksDal<'_, '_> { $17, $18, $19, + $20, NOW(), NOW() ) @@ -763,16 +897,21 @@ impl BlocksDal<'_, '_> { .base_system_contracts_hashes .default_aa .as_bytes(), + l2_block_header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), l2_block_header.protocol_version.map(|v| v as i32), i64::from(l2_block_header.virtual_blocks), l2_block_header.batch_fee_input.fair_pubdata_price() as i64, l2_block_header.gas_limit as i64, + l2_block_header.logs_bloom.as_bytes(), l2_block_header .pubdata_params .l2_da_validator_address .as_bytes(), l2_block_header.pubdata_params.pubdata_type.to_string(), - l2_block_header.logs_bloom.as_bytes(), ); instrumentation.with(query).execute(self.storage).await?; @@ -790,19 +929,20 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, fee_account_address AS "fee_account_address!", - l2_da_validator_address AS "l2_da_validator_address!", - pubdata_type AS "pubdata_type!", base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks ORDER BY @@ -832,19 +972,20 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, fee_account_address AS "fee_account_address!", - l2_da_validator_address AS "l2_da_validator_address!", - pubdata_type AS "pubdata_type!", base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks WHERE @@ -1064,6 +1205,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1073,6 +1215,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1257,6 +1400,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1266,6 +1410,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1344,6 +1489,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1353,6 +1499,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1422,6 +1569,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1431,6 +1579,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1580,6 +1729,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1589,6 +1739,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1651,6 +1802,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1660,6 +1812,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1736,6 +1889,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1745,6 +1899,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1997,6 +2152,37 @@ impl BlocksDal<'_, '_> { Ok(()) } + /// Deletes the unsealed L1 batch from the storage. Expects the caller to make sure there are no + /// associated L2 blocks. + /// + /// Accepts `batch_to_keep` as a safety mechanism. + pub async fn delete_unsealed_l1_batch( + &mut self, + batch_to_keep: L1BatchNumber, + ) -> DalResult<()> { + let deleted_row = sqlx::query!( + r#" + DELETE FROM l1_batches + WHERE + number > $1 + AND NOT is_sealed + RETURNING number + "#, + i64::from(batch_to_keep.0) + ) + .instrument("delete_unsealed_l1_batch") + .with_arg("batch_to_keep", &batch_to_keep) + .fetch_optional(self.storage) + .await?; + if let Some(deleted_row) = deleted_row { + tracing::info!( + l1_batch_number = %deleted_row.number, + "Deleted unsealed batch" + ); + } + Ok(()) + } + /// Deletes all L1 batches from the storage so that the specified batch number is the last one left. pub async fn delete_l1_batches(&mut self, last_batch_to_keep: L1BatchNumber) -> DalResult<()> { self.delete_l1_batches_inner(Some(last_batch_to_keep)).await @@ -2147,7 +2333,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_commit_tx_id IS NULL + is_sealed + AND eth_commit_tx_id IS NULL AND number > 0 ORDER BY number @@ -2169,7 +2356,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_prove_tx_id IS NULL + is_sealed + AND eth_prove_tx_id IS NULL AND number > 0 ORDER BY number @@ -2191,7 +2379,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_execute_tx_id IS NULL + is_sealed + AND eth_execute_tx_id IS NULL AND number > 0 ORDER BY number @@ -2216,7 +2405,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(l1_batch_number.0) ) @@ -2286,7 +2476,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - protocol_version = $1 + is_sealed + AND protocol_version = $1 "#, protocol_version as i32 ) @@ -2592,8 +2783,12 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> DalResult<()> { + pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> anyhow::Result<()> { self.insert_l1_batch( + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), + ) + .await?; + self.mark_l1_batch_as_sealed( header, &[], Default::default(), @@ -2789,6 +2984,40 @@ mod tests { .is_err()); } + #[tokio::test] + async fn persisting_evm_emulator_hash() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut l2_block_header = create_l2_block_header(1); + l2_block_header.base_system_contracts_hashes.evm_emulator = Some(H256::repeat_byte(0x23)); + conn.blocks_dal() + .insert_l2_block(&l2_block_header) + .await + .unwrap(); + + let mut fetched_block_header = conn + .blocks_dal() + .get_last_sealed_l2_block_header() + .await + .unwrap() + .expect("no block"); + // Batch fee input isn't restored exactly + fetched_block_header.batch_fee_input = l2_block_header.batch_fee_input; + + assert_eq!(fetched_block_header, l2_block_header); + // ...and a sanity check just to be sure + assert!(fetched_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some()); + } + #[tokio::test] async fn loading_l1_batch_header() { let pool = ConnectionPool::::test_pool().await; @@ -2870,7 +3099,13 @@ mod tests { execute: 10, }; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) + .insert_l1_batch( + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l1_batch_as_sealed(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); @@ -2878,7 +3113,13 @@ mod tests { header.timestamp += 100; predicted_gas += predicted_gas; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) + .insert_l1_batch( + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l1_batch_as_sealed(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 904e167d1a6..4cb57798638 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -5,6 +5,7 @@ use zksync_db_connection::{ use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, + debug_flat_call::CallTraceMeta, fee_model::BatchFeeInput, l2_to_l1_log::L2ToL1Log, web3::{BlockHeader, Bytes}, @@ -531,11 +532,12 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_traces_for_l2_block( &mut self, block_number: L2BlockNumber, - ) -> DalResult> { - let protocol_version = sqlx::query!( + ) -> DalResult> { + let row = sqlx::query!( r#" SELECT - protocol_version + protocol_version, + hash FROM miniblocks WHERE @@ -543,14 +545,20 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(block_number.0) ) - .try_map(|row| row.protocol_version.map(parse_protocol_version).transpose()) + .try_map(|row| { + row.protocol_version + .map(parse_protocol_version) + .transpose() + .map(|val| (val, H256::from_slice(&row.hash))) + }) .instrument("get_traces_for_l2_block#get_l2_block_protocol_version_id") .with_arg("l2_block_number", &block_number) .fetch_optional(self.storage) .await?; - let Some(protocol_version) = protocol_version else { + let Some((protocol_version, block_hash)) = row else { return Ok(Vec::new()); }; + let protocol_version = protocol_version.unwrap_or_else(ProtocolVersionId::last_potentially_undefined); @@ -577,9 +585,15 @@ impl BlocksWeb3Dal<'_, '_> { .await? .into_iter() .map(|call_trace| { - let hash = H256::from_slice(&call_trace.tx_hash); + let tx_hash = H256::from_slice(&call_trace.tx_hash); let index = call_trace.tx_index_in_block.unwrap_or_default() as usize; - (call_trace.into_call(protocol_version), hash, index) + let meta = CallTraceMeta { + index_in_block: index, + tx_hash, + block_number: block_number.0, + block_hash, + }; + (call_trace.into_call(protocol_version), meta) }) .collect()) } @@ -656,6 +670,8 @@ impl BlocksWeb3Dal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ) ) AS "l1_batch_number!", miniblocks.timestamp, @@ -673,6 +689,7 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, miniblocks.protocol_version, miniblocks.fee_account_address FROM @@ -744,7 +761,8 @@ impl BlocksWeb3Dal<'_, '_> { mb.l2_fair_gas_price, mb.fair_pubdata_price, l1_batches.bootloader_code_hash, - l1_batches.default_aa_code_hash + l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash FROM l1_batches INNER JOIN mb ON TRUE @@ -1101,9 +1119,9 @@ mod tests { .await .unwrap(); assert_eq!(traces.len(), 2); - for ((trace, hash, _index), tx_result) in traces.iter().zip(&tx_results) { + for ((trace, meta), tx_result) in traces.iter().zip(&tx_results) { let expected_trace = tx_result.call_trace().unwrap(); - assert_eq!(&tx_result.hash, hash); + assert_eq!(tx_result.hash, meta.tx_hash); assert_eq!(*trace, expected_trace); } } diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs new file mode 100644 index 00000000000..f0948adfd1d --- /dev/null +++ b/core/lib/dal/src/consensus/conv.rs @@ -0,0 +1,589 @@ +//! Protobuf conversion functions. +use anyhow::{anyhow, Context as _}; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node}; +use zksync_protobuf::{read_optional_repr, read_required, required, ProtoFmt, ProtoRepr}; +use zksync_types::{ + abi, + commitment::{L1BatchCommitmentMode, PubdataParams}, + ethabi, + fee::Fee, + l1::{OpProcessingType, PriorityQueueType}, + l2::TransactionType, + parse_h160, parse_h256, + protocol_upgrade::ProtocolUpgradeTxCommonData, + transaction_request::PaymasterParams, + Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, L2TxCommonData, + Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use super::*; + +impl ProtoFmt for BlockMetadata { + type Proto = proto::BlockMetadata; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + payload_hash: read_required(&r.payload_hash).context("payload_hash")?, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + payload_hash: Some(self.payload_hash.build()), + } + } +} + +impl ProtoRepr for proto::NodeAddr { + type Type = (node::PublicKey, net::Host); + fn read(&self) -> anyhow::Result { + Ok(( + read_required(&self.key).context("key")?, + net::Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0.build()), + addr: Some(this.1 .0.clone()), + } + } +} + +impl ProtoFmt for GlobalConfig { + type Proto = proto::GlobalConfig; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + registry_address: r + .registry_address + .as_ref() + .map(|a| parse_h160(a)) + .transpose() + .context("registry_address")?, + seed_peers: r + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + seed_peers: self + .seed_peers + .iter() + .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) + .collect(), + } + } +} +impl ProtoFmt for AttestationStatus { + type Proto = proto::AttestationStatus; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + next_batch_to_attest: attester::BatchNumber( + *required(&r.next_batch_to_attest).context("next_batch_to_attest")?, + ), + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + next_batch_to_attest: Some(self.next_batch_to_attest.0), + } + } +} + +impl ProtoRepr for proto::PubdataParams { + type Type = PubdataParams; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + l2_da_validator_address: required(&self.l2_da_validator_address) + .and_then(|a| parse_h160(a)) + .context("l2_da_validator_address")?, + pubdata_type: required(&self.pubdata_type) + .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("pubdata_type")? + .parse(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + l2_da_validator_address: Some(this.l2_da_validator_address.as_bytes().into()), + pubdata_type: Some( + proto::L1BatchCommitDataGeneratorMode::new(&this.pubdata_type) as i32, + ), + } + } +} + +impl ProtoFmt for Payload { + type Proto = proto::Payload; + + fn read(r: &Self::Proto) -> anyhow::Result { + let protocol_version = required(&r.protocol_version) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("protocol_version")?; + let mut transactions = vec![]; + + match protocol_version { + v if v >= ProtocolVersionId::Version25 => { + anyhow::ensure!( + r.transactions.is_empty(), + "transactions should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions_v25.iter().enumerate() { + transactions.push( + tx.read() + .with_context(|| format!("transactions_v25[{i}]"))?, + ); + } + } + v => { + anyhow::ensure!( + r.transactions_v25.is_empty(), + "transactions_v25 should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions.iter().enumerate() { + transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) + } + } + } + + let this = Self { + protocol_version, + hash: required(&r.hash) + .and_then(|h| parse_h256(h)) + .context("hash")?, + l1_batch_number: L1BatchNumber( + *required(&r.l1_batch_number).context("l1_batch_number")?, + ), + timestamp: *required(&r.timestamp).context("timestamp")?, + l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, + l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, + fair_pubdata_price: r.fair_pubdata_price, + virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, + operator_address: required(&r.operator_address) + .and_then(|a| parse_h160(a)) + .context("operator_address")?, + transactions, + last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, + pubdata_params: read_optional_repr(&r.pubdata_params) + .context("pubdata_params")? + .unwrap_or_default(), + }; + if this.protocol_version.is_pre_gateway() { + anyhow::ensure!( + this.pubdata_params == PubdataParams::default(), + "pubdata_params should have the default value in pre-gateway protocol_version" + ); + } + if this.pubdata_params == PubdataParams::default() { + anyhow::ensure!( + r.pubdata_params.is_none(), + "default pubdata_params should be encoded as None" + ); + } + Ok(this) + } + + fn build(&self) -> Self::Proto { + if self.protocol_version.is_pre_gateway() { + assert_eq!( + self.pubdata_params, PubdataParams::default(), + "BUG DETECTED: pubdata_params should have the default value in pre-gateway protocol_version" + ); + } + let mut x = Self::Proto { + protocol_version: Some((self.protocol_version as u16).into()), + hash: Some(self.hash.as_bytes().into()), + l1_batch_number: Some(self.l1_batch_number.0), + timestamp: Some(self.timestamp), + l1_gas_price: Some(self.l1_gas_price), + l2_fair_gas_price: Some(self.l2_fair_gas_price), + fair_pubdata_price: self.fair_pubdata_price, + virtual_blocks: Some(self.virtual_blocks), + operator_address: Some(self.operator_address.as_bytes().into()), + // Transactions are stored in execution order, therefore order is deterministic. + transactions: vec![], + transactions_v25: vec![], + last_in_batch: Some(self.last_in_batch), + pubdata_params: if self.pubdata_params == PubdataParams::default() { + None + } else { + Some(ProtoRepr::build(&self.pubdata_params)) + }, + }; + match self.protocol_version { + v if v >= ProtocolVersionId::Version25 => { + x.transactions_v25 = self.transactions.iter().map(ProtoRepr::build).collect(); + } + _ => { + x.transactions = self.transactions.iter().map(ProtoRepr::build).collect(); + } + } + x + } +} + +impl ProtoRepr for proto::TransactionV25 { + type Type = Transaction; + + fn read(&self) -> anyhow::Result { + use proto::transaction_v25::T; + let tx = match required(&self.t)? { + T::L1(l1) => abi::Transaction::L1 { + tx: required(&l1.rlp) + .and_then(|x| { + let tokens = ethabi::decode(&[abi::L2CanonicalTransaction::schema()], x) + .context("ethabi::decode()")?; + // Unwrap is safe because `ethabi::decode` does the verification. + let tx = + abi::L2CanonicalTransaction::decode(tokens.into_iter().next().unwrap()) + .context("L2CanonicalTransaction::decode()")?; + Ok(tx) + }) + .context("rlp")? + .into(), + factory_deps: l1.factory_deps.clone(), + eth_block: 0, + }, + T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), + }; + Transaction::from_abi(tx, true) + } + + fn build(tx: &Self::Type) -> Self { + let tx = abi::Transaction::try_from(tx.clone()).unwrap(); + use proto::transaction_v25::T; + Self { + t: Some(match tx { + abi::Transaction::L1 { + tx, factory_deps, .. + } => T::L1(proto::L1Transaction { + rlp: Some(ethabi::encode(&[tx.encode()])), + factory_deps, + }), + abi::Transaction::L2(tx) => T::L2(proto::L2Transaction { rlp: Some(tx) }), + }), + } + } +} + +impl ProtoRepr for proto::Transaction { + type Type = Transaction; + + fn read(&self) -> anyhow::Result { + let common_data = required(&self.common_data).context("common_data")?; + let execute = required(&self.execute).context("execute")?; + Ok(Self::Type { + common_data: match common_data { + proto::transaction::CommonData::L1(common_data) => { + anyhow::ensure!( + *required(&common_data.deadline_block) + .context("common_data.deadline_block")? + == 0 + ); + anyhow::ensure!( + required(&common_data.eth_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.eth_hash")? + == H256::default() + ); + ExecuteTransactionCommon::L1(L1TxCommonData { + sender: required(&common_data.sender_address) + .and_then(|x| parse_h160(x)) + .context("common_data.sender_address")?, + serial_id: required(&common_data.serial_id) + .map(|x| PriorityOpId(*x)) + .context("common_data.serial_id")?, + layer_2_tip_fee: required(&common_data.layer_2_tip_fee) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.layer_2_tip_fee")?, + full_fee: required(&common_data.full_fee) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.full_fee")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + op_processing_type: required(&common_data.op_processing_type) + .and_then(|x| { + OpProcessingType::try_from(u8::try_from(*x)?) + .map_err(|_| anyhow!("u8::try_from")) + }) + .context("common_data.op_processing_type")?, + priority_queue_type: required(&common_data.priority_queue_type) + .and_then(|x| { + PriorityQueueType::try_from(u8::try_from(*x)?) + .map_err(|_| anyhow!("u8::try_from")) + }) + .context("common_data.priority_queue_type")?, + eth_block: *required(&common_data.eth_block) + .context("common_data.eth_block")?, + canonical_tx_hash: required(&common_data.canonical_tx_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.canonical_tx_hash")?, + to_mint: required(&common_data.to_mint) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.to_mint")?, + refund_recipient: required(&common_data.refund_recipient_address) + .and_then(|x| parse_h160(x)) + .context("common_data.refund_recipient_address")?, + }) + } + proto::transaction::CommonData::L2(common_data) => { + ExecuteTransactionCommon::L2(L2TxCommonData { + nonce: required(&common_data.nonce) + .map(|x| Nonce(*x)) + .context("common_data.nonce")?, + fee: Fee { + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + max_priority_fee_per_gas: required( + &common_data.max_priority_fee_per_gas, + ) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_priority_fee_per_gas")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + }, + initiator_address: required(&common_data.initiator_address) + .and_then(|x| parse_h160(x)) + .context("common_data.initiator_address")?, + signature: required(&common_data.signature) + .context("common_data.signature")? + .clone(), + transaction_type: required(&common_data.transaction_type) + .and_then(|x| Ok(TransactionType::try_from(*x)?)) + .context("common_data.transaction_type")?, + input: { + match &common_data.input { + None => None, + Some(input) => Some(InputData { + hash: required(&input.hash) + .and_then(|x| parse_h256(x)) + .context("common_data.input.hash")?, + data: required(&input.data) + .context("common_data.input.data")? + .clone(), + }), + } + }, + paymaster_params: { + let params = required(&common_data.paymaster_params)?; + PaymasterParams { + paymaster: required(¶ms.paymaster_address) + .and_then(|x| parse_h160(x)) + .context("common_data.paymaster_params.paymaster_address")?, + paymaster_input: required(¶ms.paymaster_input) + .context("common_data.paymaster_params.paymaster_input")? + .clone(), + } + }, + }) + } + proto::transaction::CommonData::ProtocolUpgrade(common_data) => { + ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: required(&common_data.sender_address) + .and_then(|x| parse_h160(x)) + .context("common_data.sender_address")?, + upgrade_id: required(&common_data.upgrade_id) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("common_data.upgrade_id")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + eth_block: *required(&common_data.eth_block) + .context("common_data.eth_block")?, + canonical_tx_hash: required(&common_data.canonical_tx_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.canonical_tx_hash")?, + to_mint: required(&common_data.to_mint) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.to_mint")?, + refund_recipient: required(&common_data.refund_recipient_address) + .and_then(|x| parse_h160(x)) + .context("common_data.refund_recipient_address")?, + }) + } + }, + execute: Execute { + contract_address: execute + .contract_address + .as_ref() + .and_then(|x| parse_h160(x).ok()), + calldata: required(&execute.calldata).context("calldata")?.clone(), + value: required(&execute.value) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("execute.value")?, + factory_deps: execute.factory_deps.clone(), + }, + received_timestamp_ms: 0, // This timestamp is local to the node + raw_bytes: self.raw_bytes.as_ref().map(|x| x.clone().into()), + }) + } + + fn build(this: &Self::Type) -> Self { + let common_data = match &this.common_data { + ExecuteTransactionCommon::L1(data) => { + proto::transaction::CommonData::L1(proto::L1TxCommonData { + sender_address: Some(data.sender.as_bytes().into()), + serial_id: Some(data.serial_id.0), + deadline_block: Some(0), + layer_2_tip_fee: Some(u256_to_h256(data.layer_2_tip_fee).as_bytes().into()), + full_fee: Some(u256_to_h256(data.full_fee).as_bytes().into()), + max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), + gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), + gas_per_pubdata_limit: Some( + u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), + ), + op_processing_type: Some(data.op_processing_type as u32), + priority_queue_type: Some(data.priority_queue_type as u32), + eth_hash: Some(H256::default().as_bytes().into()), + eth_block: Some(data.eth_block), + canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), + to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), + refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), + }) + } + ExecuteTransactionCommon::L2(data) => { + proto::transaction::CommonData::L2(proto::L2TxCommonData { + nonce: Some(data.nonce.0), + gas_limit: Some(u256_to_h256(data.fee.gas_limit).as_bytes().into()), + max_fee_per_gas: Some(u256_to_h256(data.fee.max_fee_per_gas).as_bytes().into()), + max_priority_fee_per_gas: Some( + u256_to_h256(data.fee.max_priority_fee_per_gas) + .as_bytes() + .into(), + ), + gas_per_pubdata_limit: Some( + u256_to_h256(data.fee.gas_per_pubdata_limit) + .as_bytes() + .into(), + ), + initiator_address: Some(data.initiator_address.as_bytes().into()), + signature: Some(data.signature.clone()), + transaction_type: Some(data.transaction_type as u32), + input: data.input.as_ref().map(|input_data| proto::InputData { + data: Some(input_data.data.clone()), + hash: Some(input_data.hash.as_bytes().into()), + }), + paymaster_params: Some(proto::PaymasterParams { + paymaster_input: Some(data.paymaster_params.paymaster_input.clone()), + paymaster_address: Some(data.paymaster_params.paymaster.as_bytes().into()), + }), + }) + } + ExecuteTransactionCommon::ProtocolUpgrade(data) => { + proto::transaction::CommonData::ProtocolUpgrade( + proto::ProtocolUpgradeTxCommonData { + sender_address: Some(data.sender.as_bytes().into()), + upgrade_id: Some(data.upgrade_id as u32), + max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), + gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), + gas_per_pubdata_limit: Some( + u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), + ), + eth_hash: Some(H256::default().as_bytes().into()), + eth_block: Some(data.eth_block), + canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), + to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), + refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), + }, + ) + } + }; + let execute = proto::Execute { + contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), + calldata: Some(this.execute.calldata.clone()), + value: Some(u256_to_h256(this.execute.value).as_bytes().into()), + factory_deps: this.execute.factory_deps.clone(), + }; + Self { + common_data: Some(common_data), + execute: Some(execute), + raw_bytes: this.raw_bytes.as_ref().map(|inner| inner.0.clone()), + } + } +} + +impl ProtoRepr for proto::AttesterCommittee { + type Type = attester::Committee; + + fn read(&self) -> anyhow::Result { + let members: Vec<_> = self + .members + .iter() + .enumerate() + .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) + .collect::>() + .context("members")?; + Self::Type::new(members) + } + + fn build(this: &Self::Type) -> Self { + Self { + members: this.iter().map(|x| x.build()).collect(), + } + } +} + +impl proto::L1BatchCommitDataGeneratorMode { + pub(crate) fn new(n: &L1BatchCommitmentMode) -> Self { + match n { + L1BatchCommitmentMode::Rollup => Self::Rollup, + L1BatchCommitmentMode::Validium => Self::Validium, + } + } + + pub(crate) fn parse(&self) -> L1BatchCommitmentMode { + match self { + Self::Rollup => L1BatchCommitmentMode::Rollup, + Self::Validium => L1BatchCommitmentMode::Validium, + } + } +} diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 2a2df0adb45..96efc634835 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -1,31 +1,22 @@ -pub mod proto; - -#[cfg(test)] -mod testonly; -#[cfg(test)] -mod tests; - use std::collections::BTreeMap; -use anyhow::{anyhow, Context as _}; use zksync_concurrency::net; use zksync_consensus_roles::{attester, node, validator}; -use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ - abi, - commitment::{L1BatchCommitmentMode, PubdataParams}, - ethabi, - fee::Fee, - l1::{OpProcessingType, PriorityQueueType}, - l2::TransactionType, - protocol_upgrade::ProtocolUpgradeTxCommonData, - transaction_request::PaymasterParams, - Address, Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, - L2TxCommonData, Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, + commitment::PubdataParams, ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; -use crate::models::{parse_h160, parse_h256}; +mod conv; +pub mod proto; +#[cfg(test)] +mod testonly; +#[cfg(test)] +mod tests; + +#[derive(Debug, PartialEq, Clone)] +pub struct BlockMetadata { + pub payload_hash: validator::PayloadHash, +} /// Global config of the consensus. #[derive(Debug, PartialEq, Clone)] @@ -35,57 +26,6 @@ pub struct GlobalConfig { pub seed_peers: BTreeMap, } -impl ProtoRepr for proto::NodeAddr { - type Type = (node::PublicKey, net::Host); - fn read(&self) -> anyhow::Result { - Ok(( - read_required(&self.key).context("key")?, - net::Host(required(&self.addr).context("addr")?.clone()), - )) - } - fn build(this: &Self::Type) -> Self { - Self { - key: Some(this.0.build()), - addr: Some(this.1 .0.clone()), - } - } -} - -impl ProtoFmt for GlobalConfig { - type Proto = proto::GlobalConfig; - - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self { - genesis: read_required(&r.genesis).context("genesis")?, - registry_address: r - .registry_address - .as_ref() - .map(|a| parse_h160(a)) - .transpose() - .context("registry_address")?, - seed_peers: r - .seed_peers - .iter() - .enumerate() - .map(|(i, e)| e.read().context(i)) - .collect::>() - .context("seed_peers")?, - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - genesis: Some(self.genesis.build()), - registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), - seed_peers: self - .seed_peers - .iter() - .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) - .collect(), - } - } -} - /// Global attestation status served by /// `attestationStatus` RPC. #[derive(Debug, PartialEq, Clone)] @@ -94,42 +34,6 @@ pub struct AttestationStatus { pub next_batch_to_attest: attester::BatchNumber, } -impl ProtoFmt for AttestationStatus { - type Proto = proto::AttestationStatus; - - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self { - genesis: read_required(&r.genesis).context("genesis")?, - next_batch_to_attest: attester::BatchNumber( - *required(&r.next_batch_to_attest).context("next_batch_to_attest")?, - ), - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - genesis: Some(self.genesis.build()), - next_batch_to_attest: Some(self.next_batch_to_attest.0), - } - } -} - -impl proto::L1BatchCommitDataGeneratorMode { - pub(crate) fn new(n: &L1BatchCommitmentMode) -> Self { - match n { - L1BatchCommitmentMode::Rollup => Self::Rollup, - L1BatchCommitmentMode::Validium => Self::Validium, - } - } - - pub(crate) fn parse(&self) -> L1BatchCommitmentMode { - match self { - Self::Rollup => L1BatchCommitmentMode::Rollup, - Self::Validium => L1BatchCommitmentMode::Validium, - } - } -} - /// L2 block (= miniblock) payload. #[derive(Debug, PartialEq)] pub struct Payload { @@ -142,116 +46,9 @@ pub struct Payload { pub fair_pubdata_price: Option, pub virtual_blocks: u32, pub operator_address: Address, - pub pubdata_params: Option, pub transactions: Vec, pub last_in_batch: bool, -} - -impl ProtoFmt for Payload { - type Proto = proto::Payload; - - fn read(r: &Self::Proto) -> anyhow::Result { - let protocol_version = required(&r.protocol_version) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("protocol_version")?; - let mut transactions = vec![]; - - match protocol_version { - v if v >= ProtocolVersionId::Version25 => { - anyhow::ensure!( - r.transactions.is_empty(), - "transactions should be empty in protocol_version {v}" - ); - for (i, tx) in r.transactions_v25.iter().enumerate() { - transactions.push( - tx.read() - .with_context(|| format!("transactions_v25[{i}]"))?, - ); - } - } - v => { - anyhow::ensure!( - r.transactions_v25.is_empty(), - "transactions_v25 should be empty in protocol_version {v}" - ); - for (i, tx) in r.transactions.iter().enumerate() { - transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) - } - } - } - - let pubdata_params = if let Some(pubdata_params) = &r.pubdata_params { - Some(PubdataParams { - l2_da_validator_address: required(&pubdata_params.l2_da_validator_address) - .and_then(|a| parse_h160(a)) - .context("operator_address")?, - pubdata_type: required(&pubdata_params.pubdata_type) - .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) - .context("l1_batch_commit_data_generator_mode")? - .parse(), - }) - } else { - None - }; - - Ok(Self { - protocol_version, - hash: required(&r.hash) - .and_then(|h| parse_h256(h)) - .context("hash")?, - l1_batch_number: L1BatchNumber( - *required(&r.l1_batch_number).context("l1_batch_number")?, - ), - timestamp: *required(&r.timestamp).context("timestamp")?, - l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, - l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, - fair_pubdata_price: r.fair_pubdata_price, - virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, - operator_address: required(&r.operator_address) - .and_then(|a| parse_h160(a)) - .context("operator_address")?, - transactions, - last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, - pubdata_params, - }) - } - - fn build(&self) -> Self::Proto { - let mut x = Self::Proto { - protocol_version: Some((self.protocol_version as u16).into()), - hash: Some(self.hash.as_bytes().into()), - l1_batch_number: Some(self.l1_batch_number.0), - timestamp: Some(self.timestamp), - l1_gas_price: Some(self.l1_gas_price), - l2_fair_gas_price: Some(self.l2_fair_gas_price), - fair_pubdata_price: self.fair_pubdata_price, - virtual_blocks: Some(self.virtual_blocks), - operator_address: Some(self.operator_address.as_bytes().into()), - // Transactions are stored in execution order, therefore order is deterministic. - transactions: vec![], - transactions_v25: vec![], - last_in_batch: Some(self.last_in_batch), - pubdata_params: self - .pubdata_params - .map(|pubdata_params| proto::PubdataParams { - l2_da_validator_address: Some( - pubdata_params.l2_da_validator_address.as_bytes().into(), - ), - pubdata_type: Some(proto::L1BatchCommitDataGeneratorMode::new( - &pubdata_params.pubdata_type, - ) as i32), - }), - }; - match self.protocol_version { - v if v >= ProtocolVersionId::Version25 => { - x.transactions_v25 = self.transactions.iter().map(ProtoRepr::build).collect(); - } - _ => { - x.transactions = self.transactions.iter().map(ProtoRepr::build).collect(); - } - } - x - } + pub pubdata_params: PubdataParams, } impl Payload { @@ -263,337 +60,3 @@ impl Payload { validator::Payload(zksync_protobuf::encode(self)) } } - -impl ProtoRepr for proto::TransactionV25 { - type Type = Transaction; - - fn read(&self) -> anyhow::Result { - use proto::transaction_v25::T; - let tx = match required(&self.t)? { - T::L1(l1) => abi::Transaction::L1 { - tx: required(&l1.rlp) - .and_then(|x| { - let tokens = ethabi::decode(&[abi::L2CanonicalTransaction::schema()], x) - .context("ethabi::decode()")?; - // Unwrap is safe because `ethabi::decode` does the verification. - let tx = - abi::L2CanonicalTransaction::decode(tokens.into_iter().next().unwrap()) - .context("L2CanonicalTransaction::decode()")?; - Ok(tx) - }) - .context("rlp")? - .into(), - factory_deps: l1.factory_deps.clone(), - eth_block: 0, - }, - T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), - }; - tx.try_into() - } - - fn build(tx: &Self::Type) -> Self { - let tx = abi::Transaction::try_from(tx.clone()).unwrap(); - use proto::transaction_v25::T; - Self { - t: Some(match tx { - abi::Transaction::L1 { - tx, factory_deps, .. - } => T::L1(proto::L1Transaction { - rlp: Some(ethabi::encode(&[tx.encode()])), - factory_deps, - }), - abi::Transaction::L2(tx) => T::L2(proto::L2Transaction { rlp: Some(tx) }), - }), - } - } -} - -impl ProtoRepr for proto::Transaction { - type Type = Transaction; - - fn read(&self) -> anyhow::Result { - let common_data = required(&self.common_data).context("common_data")?; - let execute = required(&self.execute).context("execute")?; - Ok(Self::Type { - common_data: match common_data { - proto::transaction::CommonData::L1(common_data) => { - anyhow::ensure!( - *required(&common_data.deadline_block) - .context("common_data.deadline_block")? - == 0 - ); - anyhow::ensure!( - required(&common_data.eth_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.eth_hash")? - == H256::default() - ); - ExecuteTransactionCommon::L1(L1TxCommonData { - sender: required(&common_data.sender_address) - .and_then(|x| parse_h160(x)) - .context("common_data.sender_address")?, - serial_id: required(&common_data.serial_id) - .map(|x| PriorityOpId(*x)) - .context("common_data.serial_id")?, - layer_2_tip_fee: required(&common_data.layer_2_tip_fee) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.layer_2_tip_fee")?, - full_fee: required(&common_data.full_fee) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.full_fee")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - op_processing_type: required(&common_data.op_processing_type) - .and_then(|x| { - OpProcessingType::try_from(u8::try_from(*x)?) - .map_err(|_| anyhow!("u8::try_from")) - }) - .context("common_data.op_processing_type")?, - priority_queue_type: required(&common_data.priority_queue_type) - .and_then(|x| { - PriorityQueueType::try_from(u8::try_from(*x)?) - .map_err(|_| anyhow!("u8::try_from")) - }) - .context("common_data.priority_queue_type")?, - eth_block: *required(&common_data.eth_block) - .context("common_data.eth_block")?, - canonical_tx_hash: required(&common_data.canonical_tx_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.canonical_tx_hash")?, - to_mint: required(&common_data.to_mint) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.to_mint")?, - refund_recipient: required(&common_data.refund_recipient_address) - .and_then(|x| parse_h160(x)) - .context("common_data.refund_recipient_address")?, - }) - } - proto::transaction::CommonData::L2(common_data) => { - ExecuteTransactionCommon::L2(L2TxCommonData { - nonce: required(&common_data.nonce) - .map(|x| Nonce(*x)) - .context("common_data.nonce")?, - fee: Fee { - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - max_priority_fee_per_gas: required( - &common_data.max_priority_fee_per_gas, - ) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_priority_fee_per_gas")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - }, - initiator_address: required(&common_data.initiator_address) - .and_then(|x| parse_h160(x)) - .context("common_data.initiator_address")?, - signature: required(&common_data.signature) - .context("common_data.signature")? - .clone(), - transaction_type: required(&common_data.transaction_type) - .and_then(|x| Ok(TransactionType::try_from(*x)?)) - .context("common_data.transaction_type")?, - input: { - match &common_data.input { - None => None, - Some(input) => Some(InputData { - hash: required(&input.hash) - .and_then(|x| parse_h256(x)) - .context("common_data.input.hash")?, - data: required(&input.data) - .context("common_data.input.data")? - .clone(), - }), - } - }, - paymaster_params: { - let params = required(&common_data.paymaster_params)?; - PaymasterParams { - paymaster: required(¶ms.paymaster_address) - .and_then(|x| parse_h160(x)) - .context("common_data.paymaster_params.paymaster_address")?, - paymaster_input: required(¶ms.paymaster_input) - .context("common_data.paymaster_params.paymaster_input")? - .clone(), - } - }, - }) - } - proto::transaction::CommonData::ProtocolUpgrade(common_data) => { - ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: required(&common_data.sender_address) - .and_then(|x| parse_h160(x)) - .context("common_data.sender_address")?, - upgrade_id: required(&common_data.upgrade_id) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("common_data.upgrade_id")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - eth_block: *required(&common_data.eth_block) - .context("common_data.eth_block")?, - canonical_tx_hash: required(&common_data.canonical_tx_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.canonical_tx_hash")?, - to_mint: required(&common_data.to_mint) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.to_mint")?, - refund_recipient: required(&common_data.refund_recipient_address) - .and_then(|x| parse_h160(x)) - .context("common_data.refund_recipient_address")?, - }) - } - }, - execute: Execute { - contract_address: execute - .contract_address - .as_ref() - .and_then(|x| parse_h160(x).ok()), - calldata: required(&execute.calldata).context("calldata")?.clone(), - value: required(&execute.value) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("execute.value")?, - factory_deps: execute.factory_deps.clone(), - }, - received_timestamp_ms: 0, // This timestamp is local to the node - raw_bytes: self.raw_bytes.as_ref().map(|x| x.clone().into()), - }) - } - - fn build(this: &Self::Type) -> Self { - let common_data = match &this.common_data { - ExecuteTransactionCommon::L1(data) => { - proto::transaction::CommonData::L1(proto::L1TxCommonData { - sender_address: Some(data.sender.as_bytes().into()), - serial_id: Some(data.serial_id.0), - deadline_block: Some(0), - layer_2_tip_fee: Some(u256_to_h256(data.layer_2_tip_fee).as_bytes().into()), - full_fee: Some(u256_to_h256(data.full_fee).as_bytes().into()), - max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), - gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), - gas_per_pubdata_limit: Some( - u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), - ), - op_processing_type: Some(data.op_processing_type as u32), - priority_queue_type: Some(data.priority_queue_type as u32), - eth_hash: Some(H256::default().as_bytes().into()), - eth_block: Some(data.eth_block), - canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), - to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), - refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), - }) - } - ExecuteTransactionCommon::L2(data) => { - proto::transaction::CommonData::L2(proto::L2TxCommonData { - nonce: Some(data.nonce.0), - gas_limit: Some(u256_to_h256(data.fee.gas_limit).as_bytes().into()), - max_fee_per_gas: Some(u256_to_h256(data.fee.max_fee_per_gas).as_bytes().into()), - max_priority_fee_per_gas: Some( - u256_to_h256(data.fee.max_priority_fee_per_gas) - .as_bytes() - .into(), - ), - gas_per_pubdata_limit: Some( - u256_to_h256(data.fee.gas_per_pubdata_limit) - .as_bytes() - .into(), - ), - initiator_address: Some(data.initiator_address.as_bytes().into()), - signature: Some(data.signature.clone()), - transaction_type: Some(data.transaction_type as u32), - input: data.input.as_ref().map(|input_data| proto::InputData { - data: Some(input_data.data.clone()), - hash: Some(input_data.hash.as_bytes().into()), - }), - paymaster_params: Some(proto::PaymasterParams { - paymaster_input: Some(data.paymaster_params.paymaster_input.clone()), - paymaster_address: Some(data.paymaster_params.paymaster.as_bytes().into()), - }), - }) - } - ExecuteTransactionCommon::ProtocolUpgrade(data) => { - proto::transaction::CommonData::ProtocolUpgrade( - proto::ProtocolUpgradeTxCommonData { - sender_address: Some(data.sender.as_bytes().into()), - upgrade_id: Some(data.upgrade_id as u32), - max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), - gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), - gas_per_pubdata_limit: Some( - u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), - ), - eth_hash: Some(H256::default().as_bytes().into()), - eth_block: Some(data.eth_block), - canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), - to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), - refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), - }, - ) - } - }; - let execute = proto::Execute { - contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), - calldata: Some(this.execute.calldata.clone()), - value: Some(u256_to_h256(this.execute.value).as_bytes().into()), - factory_deps: this.execute.factory_deps.clone(), - }; - Self { - common_data: Some(common_data), - execute: Some(execute), - raw_bytes: this.raw_bytes.as_ref().map(|inner| inner.0.clone()), - } - } -} - -impl ProtoRepr for proto::AttesterCommittee { - type Type = attester::Committee; - - fn read(&self) -> anyhow::Result { - let members: Vec<_> = self - .members - .iter() - .enumerate() - .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) - .collect::>() - .context("members")?; - Self::Type::new(members) - } - - fn build(this: &Self::Type) -> Self { - Self { - members: this.iter().map(|x| x.build()).collect(), - } - } -} diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index 6083ad02910..49a69e8a36e 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -6,9 +6,8 @@ import "zksync/roles/validator.proto"; import "zksync/roles/attester.proto"; import "zksync/roles/node.proto"; -enum L1BatchCommitDataGeneratorMode { - Rollup = 0; - Validium = 1; +message BlockMetadata { + optional roles.validator.PayloadHash payload_hash = 1; // required } message Payload { @@ -31,8 +30,8 @@ message Payload { } message PubdataParams { - optional bytes l2_da_validator_address = 1; // required; H160 - optional L1BatchCommitDataGeneratorMode pubdata_type = 2; // optional, default to rollup + optional bytes l2_da_validator_address = 1; // required; H160 + optional L1BatchCommitDataGeneratorMode pubdata_type = 2; // required } message L1Transaction { @@ -149,3 +148,8 @@ message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required } + +enum L1BatchCommitDataGeneratorMode { + Rollup = 0; + Validium = 1; +} diff --git a/core/lib/dal/src/consensus/testonly.rs b/core/lib/dal/src/consensus/testonly.rs index 904a4c563d2..13086323b17 100644 --- a/core/lib/dal/src/consensus/testonly.rs +++ b/core/lib/dal/src/consensus/testonly.rs @@ -1,11 +1,17 @@ -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; +use rand::{distributions::Distribution, Rng}; +use zksync_consensus_utils::EncodeDist; -use super::AttestationStatus; +use super::*; -impl Distribution for Standard { +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> BlockMetadata { + BlockMetadata { + payload_hash: rng.gen(), + } + } +} + +impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> AttestationStatus { AttestationStatus { genesis: rng.gen(), @@ -13,3 +19,16 @@ impl Distribution for Standard { } } } + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> GlobalConfig { + GlobalConfig { + genesis: rng.gen(), + registry_address: Some(rng.gen()), + seed_peers: self + .sample_range(rng) + .map(|_| (rng.gen(), self.sample(rng))) + .collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index e50ff5b1cae..df6ee24bfa9 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -1,10 +1,10 @@ use std::fmt::Debug; use rand::Rng; -use zksync_concurrency::ctx; +use zksync_concurrency::{ctx, testonly::abort_on_panic}; use zksync_protobuf::{ repr::{decode, encode}, - testonly::{test_encode, test_encode_random}, + testonly::{test_encode, test_encode_all_formats, FmtConv}, ProtoRepr, }; use zksync_test_account::Account; @@ -14,7 +14,7 @@ use zksync_types::{ Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, }; -use super::{proto, AttestationStatus, Payload}; +use super::*; use crate::tests::mock_protocol_upgrade_transaction; fn execute(rng: &mut impl Rng) -> Execute { @@ -53,22 +53,29 @@ fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { }) .collect(), last_in_batch: rng.gen(), - pubdata_params: Some(PubdataParams { - pubdata_type: match rng.gen_range(0..2) { - 0 => L1BatchCommitmentMode::Rollup, - _ => L1BatchCommitmentMode::Validium, - }, - l2_da_validator_address: rng.gen(), - }), + pubdata_params: if protocol_version.is_pre_gateway() { + PubdataParams::default() + } else { + PubdataParams { + pubdata_type: match rng.gen_range(0..2) { + 0 => L1BatchCommitmentMode::Rollup, + _ => L1BatchCommitmentMode::Validium, + }, + l2_da_validator_address: rng.gen(), + } + }, } } /// Tests struct <-> proto struct conversions. #[test] fn test_encoding() { + abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - test_encode_random::(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); encode_decode::(l1_transaction(rng)); encode_decode::(l2_transaction(rng)); encode_decode::(l1_transaction(rng)); @@ -76,10 +83,15 @@ fn test_encoding() { encode_decode::( mock_protocol_upgrade_transaction().into(), ); - let p = payload(rng, ProtocolVersionId::Version24); - test_encode(rng, &p); - let p = payload(rng, ProtocolVersionId::Version25); - test_encode(rng, &p); + // Test encoding in the current and all the future versions. + for v in ProtocolVersionId::latest() as u16.. { + let Ok(v) = ProtocolVersionId::try_from(v) else { + break; + }; + tracing::info!("version {v}"); + let p = payload(rng, v); + test_encode(rng, &p); + } } fn encode_decode(msg: P::Type) diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal/mod.rs similarity index 74% rename from core/lib/dal/src/consensus_dal.rs rename to core/lib/dal/src/consensus_dal/mod.rs index dd976f22086..a091421d857 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal/mod.rs @@ -1,16 +1,63 @@ use anyhow::Context as _; +use zksync_consensus_crypto::keccak256::Keccak256; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BlockStoreState, ReplicaState}; +use zksync_consensus_storage::{BlockStoreState, Last, ReplicaState}; use zksync_db_connection::{ connection::Connection, error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_types::L2BlockNumber; +use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_types::{L1BatchNumber, L2BlockNumber}; -pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; +pub use crate::consensus::{proto, AttestationStatus, BlockMetadata, GlobalConfig, Payload}; use crate::{Core, CoreDal}; +#[cfg(test)] +mod tests; + +/// Hash of the batch. +pub fn batch_hash(info: &StoredBatchInfo) -> attester::BatchHash { + attester::BatchHash(Keccak256::from_bytes(info.hash().0)) +} + +/// Verifies that the transition from `old` to `new` is admissible. +pub fn verify_config_transition(old: &GlobalConfig, new: &GlobalConfig) -> anyhow::Result<()> { + anyhow::ensure!( + old.genesis.chain_id == new.genesis.chain_id, + "changing chain_id is not allowed: old = {:?}, new = {:?}", + old.genesis.chain_id, + new.genesis.chain_id, + ); + // Note that it may happen that the fork number didn't change, + // in case the binary was updated to support more fields in genesis struct. + // In such a case, the old binary was not able to connect to the consensus network, + // because of the genesis hash mismatch. + // TODO: Perhaps it would be better to deny unknown fields in the genesis instead. + // It would require embedding the genesis either as a json string or protobuf bytes within + // the global config, so that the global config can be parsed with + // `deny_unknown_fields:false` while genesis would be parsed with + // `deny_unknown_fields:true`. + anyhow::ensure!( + old.genesis.fork_number <= new.genesis.fork_number, + "transition to a past fork is not allowed: old = {:?}, new = {:?}", + old.genesis.fork_number, + new.genesis.fork_number, + ); + new.genesis.verify().context("genesis.verify()")?; + // This is a temporary hack until the `consensus_genesis()` RPC is disabled. + if new + == (&GlobalConfig { + genesis: old.genesis.clone(), + registry_address: None, + seed_peers: [].into(), + }) + { + anyhow::bail!("new config is equal to truncated old config, which means that it was sourced from the wrong endpoint"); + } + Ok(()) +} + /// Storage access methods for `zksync_core::consensus` module. #[derive(Debug)] pub struct ConsensusDal<'a, 'c> { @@ -22,8 +69,8 @@ pub struct ConsensusDal<'a, 'c> { pub enum InsertCertificateError { #[error("corresponding payload is missing")] MissingPayload, - #[error("certificate doesn't match the payload")] - PayloadMismatch, + #[error("certificate doesn't match the payload, payload = {0:?}")] + PayloadMismatch(Payload), #[error(transparent)] Dal(#[from] DalError), #[error(transparent)] @@ -85,6 +132,8 @@ impl ConsensusDal<'_, '_> { if got == want { return Ok(()); } + verify_config_transition(got, want)?; + // If genesis didn't change, just update the config. if got.genesis == want.genesis { let s = zksync_protobuf::serde::Serialize; @@ -103,30 +152,6 @@ impl ConsensusDal<'_, '_> { txn.commit().await?; return Ok(()); } - - // Verify the genesis change. - anyhow::ensure!( - got.genesis.chain_id == want.genesis.chain_id, - "changing chain_id is not allowed: old = {:?}, new = {:?}", - got.genesis.chain_id, - want.genesis.chain_id, - ); - // Note that it may happen that the fork number didn't change, - // in case the binary was updated to support more fields in genesis struct. - // In such a case, the old binary was not able to connect to the consensus network, - // because of the genesis hash mismatch. - // TODO: Perhaps it would be better to deny unknown fields in the genesis instead. - // It would require embedding the genesis either as a json string or protobuf bytes within - // the global config, so that the global config can be parsed with - // `deny_unknown_fields:false` while genesis would be parsed with - // `deny_unknown_fields:true`. - anyhow::ensure!( - got.genesis.fork_number <= want.genesis.fork_number, - "transition to a past fork is not allowed: old = {:?}, new = {:?}", - got.genesis.fork_number, - want.genesis.fork_number, - ); - want.genesis.verify().context("genesis.verify()")?; } // Reset the consensus state. @@ -305,47 +330,63 @@ impl ConsensusDal<'_, '_> { Ok(next) } - /// Fetches the last consensus certificate. + /// Fetches the block store state. + /// The blocks that are available to consensus are either pre-genesis or + /// have a consensus certificate. /// Currently, certificates are NOT generated synchronously with L2 blocks, - /// so it might NOT be the certificate for the last L2 block. - pub async fn block_certificates_range(&mut self) -> anyhow::Result { - // It cannot be older than genesis first block. - let mut start = self + /// so the `BlockStoreState.last` might be different than the last block in storage. + pub async fn block_store_state(&mut self) -> anyhow::Result { + let first = self.first_block().await.context("first_block()")?; + let cfg = self .global_config() - .await? - .context("genesis()")? - .genesis - .first_block; - start = start.max(self.first_block().await.context("first_block()")?); - let row = sqlx::query!( + .await + .context("global_config()")? + .context("global config is missing")?; + + // If there is a cert in storage, then the block range visible to consensus + // is [first block, block of last cert]. + if let Some(row) = sqlx::query!( r#" SELECT certificate FROM miniblocks_consensus - WHERE - number >= $1 ORDER BY number DESC LIMIT 1 "#, - i64::try_from(start.0)?, ) .instrument("block_certificate_range") .report_latency() .fetch_optional(self.storage) - .await?; - Ok(BlockStoreState { - first: start, - last: row - .map(|row| { + .await? + { + return Ok(BlockStoreState { + first, + last: Some(Last::Final( zksync_protobuf::serde::Deserialize { deny_unknown_fields: true, } - .proto_fmt(row.certificate) - }) - .transpose()?, + .proto_fmt(row.certificate)?, + )), + }); + } + + // Otherwise it is [first block, min(genesis.first_block-1,last block)]. + let next = self + .next_block() + .await + .context("next_block()")? + .min(cfg.genesis.first_block); + Ok(BlockStoreState { + first, + // unwrap is ok, because `next > first >= 0`. + last: if next > first { + Some(Last::PreGenesis(next.prev().unwrap())) + } else { + None + }, }) } @@ -461,6 +502,19 @@ impl ConsensusDal<'_, '_> { .next()) } + /// Fetches L2 block metadata for the given block number. + pub async fn block_metadata( + &mut self, + n: validator::BlockNumber, + ) -> anyhow::Result> { + let Some(b) = self.block_payload(n).await.context("block_payload()")? else { + return Ok(None); + }; + Ok(Some(BlockMetadata { + payload_hash: b.encode().hash(), + })) + } + /// Inserts a certificate for the L2 block `cert.header().number`. /// Fails if certificate doesn't match the stored block. pub async fn insert_block_certificate( @@ -474,7 +528,7 @@ impl ConsensusDal<'_, '_> { .await? .ok_or(E::MissingPayload)?; if header.payload != want_payload.encode().hash() { - return Err(E::PayloadMismatch); + return Err(E::PayloadMismatch(want_payload)); } sqlx::query!( r#" @@ -558,11 +612,25 @@ impl ConsensusDal<'_, '_> { )) } + /// Fetches the L1 batch info for the given number. + pub async fn batch_info( + &mut self, + number: attester::BatchNumber, + ) -> anyhow::Result> { + let n = L1BatchNumber(number.0.try_into().context("overflow")?); + Ok(self + .storage + .blocks_dal() + .get_l1_batch_metadata(n) + .await + .context("get_l1_batch_metadata()")? + .map(|x| StoredBatchInfo::from(&x))) + } + /// Inserts a certificate for the L1 batch. /// Noop if a certificate for the same L1 batch is already present. /// Verification against previously stored attester committee is performed. - /// Batch hash is not verified - it cannot be performed due to circular dependency on - /// `zksync_l1_contract_interface`. + /// Batch hash verification is performed. pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, @@ -577,6 +645,14 @@ impl ConsensusDal<'_, '_> { .await .context("attester_committee()")? .context("attester committee is missing")?; + let hash = batch_hash( + &self + .batch_info(cert.message.number) + .await + .context("batch()")? + .context("batch is missing")?, + ); + anyhow::ensure!(cert.message.hash == hash, "hash mismatch"); cert.verify(cfg.genesis.hash(), &committee) .context("cert.verify()")?; sqlx::query!( @@ -645,6 +721,8 @@ impl ConsensusDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ), ( SELECT @@ -711,158 +789,3 @@ impl ConsensusDal<'_, '_> { })) } } - -#[cfg(test)] -mod tests { - use rand::Rng as _; - use zksync_consensus_roles::{attester, validator}; - use zksync_consensus_storage::ReplicaState; - use zksync_types::ProtocolVersion; - - use super::GlobalConfig; - use crate::{ - tests::{create_l1_batch_header, create_l2_block_header}, - ConnectionPool, Core, CoreDal, - }; - - #[tokio::test] - async fn replica_state_read_write() { - let rng = &mut rand::thread_rng(); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); - for n in 0..3 { - let setup = validator::testonly::Setup::new(rng, 3); - let mut genesis = (*setup.genesis).clone(); - genesis.fork_number = validator::ForkNumber(n); - let cfg = GlobalConfig { - genesis: genesis.with_hash(), - registry_address: Some(rng.gen()), - seed_peers: [].into(), // TODO: rng.gen() for Host - }; - conn.consensus_dal() - .try_update_global_config(&cfg) - .await - .unwrap(); - assert_eq!( - cfg, - conn.consensus_dal().global_config().await.unwrap().unwrap() - ); - assert_eq!( - ReplicaState::default(), - conn.consensus_dal().replica_state().await.unwrap() - ); - for _ in 0..5 { - let want: ReplicaState = rng.gen(); - conn.consensus_dal().set_replica_state(&want).await.unwrap(); - assert_eq!( - cfg, - conn.consensus_dal().global_config().await.unwrap().unwrap() - ); - assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); - } - } - } - - #[tokio::test] - async fn test_batch_certificate() { - let rng = &mut rand::thread_rng(); - let setup = validator::testonly::Setup::new(rng, 3); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - let cfg = GlobalConfig { - genesis: setup.genesis.clone(), - registry_address: Some(rng.gen()), - seed_peers: [].into(), - }; - conn.consensus_dal() - .try_update_global_config(&cfg) - .await - .unwrap(); - - let mut make_cert = |number: attester::BatchNumber| { - let m = attester::Batch { - genesis: setup.genesis.hash(), - hash: rng.gen(), - number, - }; - let mut sigs = attester::MultiSig::default(); - for k in &setup.attester_keys { - sigs.add(k.public(), k.sign_msg(m.clone()).sig); - } - attester::BatchQC { - message: m, - signatures: sigs, - } - }; - - // Required for inserting l2 blocks - conn.protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion::default()) - .await - .unwrap(); - - // Insert some mock L2 blocks and L1 batches - let mut block_number = 0; - let mut batch_number = 0; - for _ in 0..3 { - for _ in 0..3 { - block_number += 1; - let l2_block = create_l2_block_header(block_number); - conn.blocks_dal().insert_l2_block(&l2_block).await.unwrap(); - } - batch_number += 1; - let l1_batch = create_l1_batch_header(batch_number); - conn.blocks_dal() - .insert_mock_l1_batch(&l1_batch) - .await - .unwrap(); - conn.blocks_dal() - .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) - .await - .unwrap(); - } - - let n = attester::BatchNumber(batch_number.into()); - - // Insert a batch certificate for the last L1 batch. - let want = make_cert(n); - conn.consensus_dal() - .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) - .await - .unwrap(); - conn.consensus_dal() - .insert_batch_certificate(&want) - .await - .unwrap(); - - // Reinserting a cert should fail. - assert!(conn - .consensus_dal() - .insert_batch_certificate(&make_cert(n)) - .await - .is_err()); - - // Retrieve the latest certificate. - let got_n = conn - .consensus_dal() - .last_batch_certificate_number() - .await - .unwrap() - .unwrap(); - let got = conn - .consensus_dal() - .batch_certificate(got_n) - .await - .unwrap() - .unwrap(); - assert_eq!(got, want); - - // Try insert batch certificate for non-existing batch - assert!(conn - .consensus_dal() - .insert_batch_certificate(&make_cert(n.next())) - .await - .is_err()); - } -} diff --git a/core/lib/dal/src/consensus_dal/tests.rs b/core/lib/dal/src/consensus_dal/tests.rs new file mode 100644 index 00000000000..694abc8508b --- /dev/null +++ b/core/lib/dal/src/consensus_dal/tests.rs @@ -0,0 +1,189 @@ +use rand::Rng as _; +use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_storage::ReplicaState; +use zksync_types::{ + block::L1BatchTreeData, + commitment::{L1BatchCommitmentArtifacts, L1BatchCommitmentHash}, + ProtocolVersion, +}; + +use super::*; +use crate::{ + tests::{create_l1_batch_header, create_l2_block_header}, + ConnectionPool, Core, CoreDal, +}; + +#[tokio::test] +async fn replica_state_read_write() { + let rng = &mut rand::thread_rng(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); + for n in 0..3 { + let setup = validator::testonly::Setup::new(rng, 3); + let mut genesis = (*setup.genesis).clone(); + genesis.fork_number = validator::ForkNumber(n); + let cfg = GlobalConfig { + genesis: genesis.with_hash(), + registry_address: Some(rng.gen()), + seed_peers: [].into(), // TODO: rng.gen() for Host + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); + assert_eq!( + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() + ); + assert_eq!( + ReplicaState::default(), + conn.consensus_dal().replica_state().await.unwrap() + ); + for _ in 0..5 { + let want: ReplicaState = rng.gen(); + conn.consensus_dal().set_replica_state(&want).await.unwrap(); + assert_eq!( + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() + ); + assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); + } + } +} + +#[tokio::test] +async fn test_batch_certificate() { + let rng = &mut rand::thread_rng(); + let setup = validator::testonly::Setup::new(rng, 3); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + let cfg = GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: Some(rng.gen()), + seed_peers: [].into(), + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); + + let make_cert = |number: attester::BatchNumber, hash: attester::BatchHash| { + let m = attester::Batch { + genesis: setup.genesis.hash(), + hash, + number, + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } + attester::BatchQC { + message: m, + signatures: sigs, + } + }; + + // Required for inserting l2 blocks + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + // Insert some mock L2 blocks and L1 batches + let mut block_number = 0; + let mut batch_number = 0; + for _ in 0..3 { + for _ in 0..3 { + block_number += 1; + let l2_block = create_l2_block_header(block_number); + conn.blocks_dal().insert_l2_block(&l2_block).await.unwrap(); + } + batch_number += 1; + let l1_batch = create_l1_batch_header(batch_number); + conn.blocks_dal() + .insert_mock_l1_batch(&l1_batch) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_tree_data( + l1_batch.number, + &L1BatchTreeData { + hash: rng.gen(), + rollup_last_leaf_index: rng.gen(), + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_commitment_artifacts( + l1_batch.number, + &L1BatchCommitmentArtifacts { + commitment_hash: L1BatchCommitmentHash { + pass_through_data: rng.gen(), + aux_output: rng.gen(), + meta_parameters: rng.gen(), + commitment: rng.gen(), + }, + l2_l1_merkle_root: rng.gen(), + compressed_state_diffs: None, + compressed_initial_writes: None, + compressed_repeated_writes: None, + zkporter_is_available: false, + aux_commitments: None, + aggregation_root: rng.gen(), + local_root: rng.gen(), + state_diff_hash: rng.gen(), + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) + .await + .unwrap(); + } + + let n = attester::BatchNumber(batch_number.into()); + + // Insert a batch certificate for the last L1 batch. + let hash = batch_hash(&conn.consensus_dal().batch_info(n).await.unwrap().unwrap()); + let want = make_cert(n, hash); + conn.consensus_dal() + .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) + .await + .unwrap(); + conn.consensus_dal() + .insert_batch_certificate(&want) + .await + .unwrap(); + + // Reinserting a cert should fail. + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n, hash)) + .await + .is_err()); + + // Retrieve the latest certificate. + let got_n = conn + .consensus_dal() + .last_batch_certificate_number() + .await + .unwrap() + .unwrap(); + let got = conn + .consensus_dal() + .batch_certificate(got_n) + .await + .unwrap() + .unwrap(); + assert_eq!(got, want); + + // Try insert batch certificate for non-existing batch + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n.next(), rng.gen())) + .await + .is_err()); +} diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 36dfaa1a466..857e2973ae3 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -94,6 +94,7 @@ impl FactoryDepsDal<'_, '_> { &mut self, bootloader_hash: H256, default_aa_hash: H256, + evm_emulator_hash: Option, ) -> anyhow::Result { let bootloader_bytecode = self .get_sealed_factory_dep(bootloader_hash) @@ -115,9 +116,26 @@ impl FactoryDepsDal<'_, '_> { code: bytes_to_be_words(default_aa_bytecode), hash: default_aa_hash, }; + + let evm_emulator_code = if let Some(evm_emulator_hash) = evm_emulator_hash { + let evm_emulator_bytecode = self + .get_sealed_factory_dep(evm_emulator_hash) + .await + .context("failed loading EVM emulator code")? + .with_context(|| format!("EVM emulator code with hash {evm_emulator_hash:?} should be present in the database"))?; + + Some(SystemContractCode { + code: bytes_to_be_words(evm_emulator_bytecode), + hash: evm_emulator_hash, + }) + } else { + None + }; + Ok(BaseSystemContracts { bootloader: bootloader_code, default_aa: default_aa_code, + evm_emulator: evm_emulator_code, }) } diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index a57ebd9e48a..20b428adec4 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -23,8 +23,7 @@ use crate::{ snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, sync_dal::SyncDal, system_dal::SystemDal, tee_proof_generation_dal::TeeProofGenerationDal, - tee_verifier_input_producer_dal::TeeVerifierInputProducerDal, tokens_dal::TokensDal, - tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, + tokens_dal::TokensDal, tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, transactions_web3_dal::TransactionsWeb3Dal, vm_runner_dal::VmRunnerDal, }; @@ -56,7 +55,6 @@ pub mod storage_web3_dal; pub mod sync_dal; pub mod system_dal; pub mod tee_proof_generation_dal; -pub mod tee_verifier_input_producer_dal; pub mod tokens_dal; pub mod tokens_web3_dal; pub mod transactions_dal; @@ -81,8 +79,6 @@ where fn transactions_web3_dal(&mut self) -> TransactionsWeb3Dal<'_, 'a>; - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a>; - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a>; fn blocks_web3_dal(&mut self) -> BlocksWeb3Dal<'_, 'a>; @@ -155,10 +151,6 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { TransactionsWeb3Dal { storage: self } } - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a> { - TeeVerifierInputProducerDal { storage: self } - } - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a> { BlocksDal { storage: self } } diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 479649f8509..12e41ac780a 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -1,7 +1,6 @@ pub mod storage_block; -use anyhow::Context as _; use zksync_db_connection::error::SqlxContext; -use zksync_types::{ProtocolVersionId, H160, H256}; +use zksync_types::ProtocolVersionId; mod call; pub mod storage_base_token_ratio; @@ -19,18 +18,6 @@ pub mod storage_verification_request; #[cfg(test)] mod tests; -pub(crate) fn parse_h256(bytes: &[u8]) -> anyhow::Result { - Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) -} - -fn parse_h256_opt(bytes: Option<&[u8]>) -> anyhow::Result { - parse_h256(bytes.context("missing data")?) -} - -pub(crate) fn parse_h160(bytes: &[u8]) -> anyhow::Result { - Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) -} - pub(crate) fn parse_protocol_version(raw: i32) -> sqlx::Result { u16::try_from(raw) .decode_column("protocol_version")? diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 3d9264ddd9e..159ed71cc3e 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -6,7 +6,7 @@ use thiserror::Error; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, - block::{L1BatchHeader, L2BlockHeader}, + block::{L1BatchHeader, L2BlockHeader, UnsealedL1BatchHeader}, commitment::{L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, PubdataParams}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, @@ -44,6 +44,7 @@ pub(crate) struct StorageL1BatchHeader { pub used_contract_hashes: serde_json::Value, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, // `system_logs` are introduced as part of boojum and will be absent in all batches generated prior to boojum. @@ -52,6 +53,7 @@ pub(crate) struct StorageL1BatchHeader { // will be exactly 7 (or 8 in the event of a protocol upgrade) system logs. pub system_logs: Vec>, pub pubdata_input: Option>, + pub fee_address: Vec, } impl StorageL1BatchHeader { @@ -82,12 +84,14 @@ impl StorageL1BatchHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self .protocol_version .map(|v| (v as u16).try_into().unwrap()), pubdata_input: self.pubdata_input, + fee_address: Address::from_slice(&self.fee_address), } } } @@ -103,6 +107,7 @@ fn convert_l2_to_l1_logs(raw_logs: Vec>) -> Vec { fn convert_base_system_contracts_hashes( bootloader_code_hash: Option>, default_aa_code_hash: Option>, + evm_emulator_code_hash: Option>, ) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: bootloader_code_hash @@ -111,6 +116,7 @@ fn convert_base_system_contracts_hashes( default_aa: default_aa_code_hash .map(|hash| H256::from_slice(&hash)) .expect("should not be none"), + evm_emulator: evm_emulator_code_hash.map(|hash| H256::from_slice(&hash)), } } @@ -134,15 +140,13 @@ pub(crate) struct StorageL1Batch { pub zkporter_is_available: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub l2_to_l1_messages: Vec>, pub l2_l1_merkle_root: Option>, pub compressed_initial_writes: Option>, pub compressed_repeated_writes: Option>, - pub aggregation_root: Option>, - pub local_root: Option>, - pub used_contract_hashes: serde_json::Value, pub system_logs: Vec>, pub compressed_state_diffs: Option>, @@ -150,6 +154,9 @@ pub(crate) struct StorageL1Batch { pub events_queue_commitment: Option>, pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, + pub fee_address: Vec, + pub aggregation_root: Option>, + pub local_root: Option>, pub state_diff_hash: Option>, pub inclusion_data: Option>, } @@ -182,12 +189,14 @@ impl StorageL1Batch { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self .protocol_version .map(|v| (v as u16).try_into().unwrap()), pubdata_input: self.pubdata_input, + fee_address: Address::from_slice(&self.fee_address), } } } @@ -245,6 +254,10 @@ impl TryFrom for L1BatchMetadata { .default_aa_code_hash .ok_or(L1BatchMetadataError::Incomplete("default_aa_code_hash"))?, ), + evm_emulator_code_hash: batch + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), protocol_version: batch .protocol_version .map(|v| (v as u16).try_into().unwrap()), @@ -262,6 +275,38 @@ impl TryFrom for L1BatchMetadata { } } +/// Partial projection of the columns corresponding to an unsealed [`L1BatchHeader`]. +#[derive(Debug, Clone)] +pub(crate) struct UnsealedStorageL1Batch { + pub number: i64, + pub timestamp: i64, + pub protocol_version: Option, + pub fee_address: Vec, + pub l1_gas_price: i64, + pub l2_fair_gas_price: i64, + pub fair_pubdata_price: Option, +} + +impl From for UnsealedL1BatchHeader { + fn from(batch: UnsealedStorageL1Batch) -> Self { + let protocol_version: Option = batch + .protocol_version + .map(|v| (v as u16).try_into().unwrap()); + Self { + number: L1BatchNumber(batch.number as u32), + timestamp: batch.timestamp as u64, + protocol_version, + fee_address: Address::from_slice(&batch.fee_address), + fee_input: BatchFeeInput::for_protocol_version( + protocol_version.unwrap_or_else(ProtocolVersionId::last_potentially_undefined), + batch.l2_fair_gas_price as u64, + batch.fair_pubdata_price.map(|p| p as u64), + batch.l1_gas_price as u64, + ), + } + } +} + #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageBlockDetails { pub number: i64, @@ -284,6 +329,7 @@ pub(crate) struct StorageBlockDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: Option, } @@ -329,6 +375,7 @@ impl From for api::BlockDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::BlockDetails { @@ -361,6 +408,7 @@ pub(crate) struct StorageL1BatchDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, } impl From for api::L1BatchDetails { @@ -404,6 +452,7 @@ impl From for api::L1BatchDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::L1BatchDetails { @@ -427,8 +476,7 @@ pub(crate) struct StorageL2BlockHeader { // L2 gas price assumed in the corresponding batch pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, - pub l2_da_validator_address: Vec, - pub pubdata_type: String, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, pub fair_pubdata_price: Option, @@ -445,6 +493,8 @@ pub(crate) struct StorageL2BlockHeader { /// This value should bound the maximal amount of gas that can be spent by transactions in the miniblock. pub gas_limit: Option, pub logs_bloom: Option>, + pub l2_da_validator_address: Vec, + pub pubdata_type: String, } impl From for L2BlockHeader { @@ -482,11 +532,8 @@ impl From for L2BlockHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( row.bootloader_code_hash, row.default_aa_code_hash, + row.evm_emulator_code_hash, ), - pubdata_params: PubdataParams { - l2_da_validator_address: Address::from_slice(&row.l2_da_validator_address), - pubdata_type: L1BatchCommitmentMode::from_str(&row.pubdata_type).unwrap(), - }, gas_per_pubdata_limit: row.gas_per_pubdata_limit as u64, protocol_version, virtual_blocks: row.virtual_blocks as u32, @@ -495,6 +542,10 @@ impl From for L2BlockHeader { .logs_bloom .map(|b| Bloom::from_slice(&b)) .unwrap_or_default(), + pubdata_params: PubdataParams { + l2_da_validator_address: Address::from_slice(&row.l2_da_validator_address), + pubdata_type: L1BatchCommitmentMode::from_str(&row.pubdata_type).unwrap(), + }, } } } diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index e53bf7b9d0a..a833236a7b6 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -16,6 +16,7 @@ pub struct StorageProtocolVersion { pub snark_wrapper_vk_hash: Vec, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, } pub(crate) fn protocol_version_from_storage( @@ -34,6 +35,10 @@ pub(crate) fn protocol_version_from_storage( base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: H256::from_slice(&storage_version.bootloader_code_hash), default_aa: H256::from_slice(&storage_version.default_account_code_hash), + evm_emulator: storage_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), }, tx, } @@ -45,6 +50,7 @@ pub struct StorageApiProtocolVersion { pub timestamp: i64, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, pub upgrade_tx_hash: Option>, } @@ -60,6 +66,10 @@ impl From for api::ProtocolVersion { storage_protocol_version.timestamp as u64, H256::from_slice(&storage_protocol_version.bootloader_code_hash), H256::from_slice(&storage_protocol_version.default_account_code_hash), + storage_protocol_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), l2_system_upgrade_tx_hash, ) } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 7bb3c228748..3f80f52c56e 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -5,13 +5,11 @@ use zksync_db_connection::error::SqlxContext; use zksync_types::{ api::en, commitment::{L1BatchCommitmentMode, PubdataParams}, - Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, + parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, + ProtocolVersionId, Transaction, H256, }; -use crate::{ - consensus_dal::Payload, - models::{parse_h160, parse_h256, parse_h256_opt, parse_protocol_version}, -}; +use crate::{consensus_dal::Payload, models::parse_protocol_version}; #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageSyncBlock { @@ -26,6 +24,7 @@ pub(crate) struct StorageSyncBlock { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: i32, pub virtual_blocks: i64, @@ -82,12 +81,12 @@ impl TryFrom for SyncBlock { .decode_column("bootloader_code_hash")?, default_aa: parse_h256_opt(block.default_aa_code_hash.as_deref()) .decode_column("default_aa_code_hash")?, - }, - pubdata_params: PubdataParams { - pubdata_type: L1BatchCommitmentMode::from_str(&block.pubdata_type) - .expect("Invalid pubdata type"), - l2_da_validator_address: parse_h160(&block.l2_da_validator_address) - .decode_column("l2_da_validator_address")?, + evm_emulator: block + .evm_emulator_code_hash + .as_deref() + .map(parse_h256) + .transpose() + .decode_column("evm_emulator_code_hash")?, }, fee_account_address: parse_h160(&block.fee_account_address) .decode_column("fee_account_address")?, @@ -97,6 +96,12 @@ impl TryFrom for SyncBlock { .decode_column("virtual_blocks")?, hash: parse_h256(&block.hash).decode_column("hash")?, protocol_version: parse_protocol_version(block.protocol_version)?, + pubdata_params: PubdataParams { + pubdata_type: L1BatchCommitmentMode::from_str(&block.pubdata_type) + .decode_column("Invalid pubdata type")?, + l2_da_validator_address: parse_h160(&block.l2_da_validator_address) + .decode_column("l2_da_validator_address")?, + }, }) } } @@ -134,7 +139,7 @@ impl SyncBlock { operator_address: self.fee_account_address, transactions, last_in_batch: self.last_in_batch, - pubdata_params: Some(self.pubdata_params), + pubdata_params: self.pubdata_params, } } } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index bb219ee1d61..78daaebb335 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -352,6 +352,16 @@ impl From for TransactionReceipt { .index_in_block .map_or_else(Default::default, U64::from); + // For better compatibility with various clients, we never return `None` recipient address. + let to = storage_receipt + .transfer_to + .or(storage_receipt.execute_contract_address) + .and_then(|addr| { + serde_json::from_value::>(addr) + .expect("invalid address value in the database") + }) + .unwrap_or_else(Address::zero); + let block_hash = H256::from_slice(&storage_receipt.block_hash); TransactionReceipt { transaction_hash: H256::from_slice(&storage_receipt.tx_hash), @@ -361,15 +371,7 @@ impl From for TransactionReceipt { l1_batch_tx_index: storage_receipt.l1_batch_tx_index.map(U64::from), l1_batch_number: storage_receipt.l1_batch_number.map(U64::from), from: H160::from_slice(&storage_receipt.initiator_address), - to: storage_receipt - .transfer_to - .or(storage_receipt.execute_contract_address) - .map(|addr| { - serde_json::from_value::
(addr) - .expect("invalid address value in the database") - }) - // For better compatibility with various clients, we never return null. - .or_else(|| Some(Address::default())), + to: Some(to), cumulative_gas_used: Default::default(), // TODO: Should be actually calculated (SMA-1183). gas_used: { let refunded_gas: U256 = storage_receipt.refunded_gas.into(); @@ -508,6 +510,10 @@ impl StorageApiTransaction { .signature .and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); + let to = serde_json::from_value(self.execute_contract_address) + .ok() + .unwrap_or_default(); + // For legacy and EIP-2930 transactions it is gas price willing to be paid by the sender in wei. // For other transactions it should be the effective gas price if transaction is included in block, // otherwise this value should be set equal to the max fee per gas. @@ -528,7 +534,7 @@ impl StorageApiTransaction { block_number: self.block_number.map(|number| U64::from(number as u64)), transaction_index: self.index_in_block.map(|idx| U64::from(idx as u64)), from: Some(Address::from_slice(&self.initiator_address)), - to: Some(serde_json::from_value(self.execute_contract_address).unwrap()), + to, value: bigdecimal_to_u256(self.value), gas_price: Some(bigdecimal_to_u256(gas_price)), gas: bigdecimal_to_u256(self.gas_limit.unwrap_or_else(BigDecimal::zero)), diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 72ae811ce76..fcc756e3006 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -45,17 +45,22 @@ impl ProtocolVersionsDal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash, created_at ) VALUES - ($1, $2, $3, $4, $5, NOW()) + ($1, $2, $3, $4, $5, $6, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, timestamp as i64, base_system_contracts_hashes.bootloader.as_bytes(), base_system_contracts_hashes.default_aa.as_bytes(), + base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), tx_hash.as_ref().map(H256::as_bytes), ) .instrument("save_protocol_version#minor") @@ -185,6 +190,43 @@ impl ProtocolVersionsDal<'_, '_> { ProtocolVersionId::try_from(row.id as u16).map_err(|err| sqlx::Error::Decode(err.into())) } + /// Returns base system contracts' hashes. Prefer `load_base_system_contracts_by_version_id` if + /// you also want to load the contracts themselves AND expect the contracts to be in the DB + /// already. + pub async fn get_base_system_contract_hashes_by_version_id( + &mut self, + version_id: u16, + ) -> anyhow::Result> { + let row = sqlx::query!( + r#" + SELECT + bootloader_code_hash, + default_account_code_hash, + evm_emulator_code_hash + FROM + protocol_versions + WHERE + id = $1 + "#, + i32::from(version_id) + ) + .instrument("get_base_system_contract_hashes_by_version_id") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) + .await + .context("cannot fetch system contract hashes")?; + + Ok(if let Some(row) = row { + Some(BaseSystemContractsHashes { + bootloader: H256::from_slice(&row.bootloader_code_hash), + default_aa: H256::from_slice(&row.default_account_code_hash), + evm_emulator: row.evm_emulator_code_hash.as_deref().map(H256::from_slice), + }) + } else { + None + }) + } + pub async fn load_base_system_contracts_by_version_id( &mut self, version_id: u16, @@ -193,7 +235,8 @@ impl ProtocolVersionsDal<'_, '_> { r#" SELECT bootloader_code_hash, - default_account_code_hash + default_account_code_hash, + evm_emulator_code_hash FROM protocol_versions WHERE @@ -201,7 +244,9 @@ impl ProtocolVersionsDal<'_, '_> { "#, i32::from(version_id) ) - .fetch_optional(self.storage.conn()) + .instrument("load_base_system_contracts_by_version_id") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) .await .context("cannot fetch system contract hashes")?; @@ -212,6 +257,7 @@ impl ProtocolVersionsDal<'_, '_> { .get_base_system_contracts( H256::from_slice(&row.bootloader_code_hash), H256::from_slice(&row.default_account_code_hash), + row.evm_emulator_code_hash.as_deref().map(H256::from_slice), ) .await?; Some(contracts) @@ -232,6 +278,7 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions.timestamp, protocol_versions.bootloader_code_hash, protocol_versions.default_account_code_hash, + protocol_versions.evm_emulator_code_hash, protocol_patches.patch, protocol_patches.snark_wrapper_vk_hash FROM @@ -373,6 +420,8 @@ impl ProtocolVersionsDal<'_, '_> { protocol_version FROM l1_batches + WHERE + is_sealed ORDER BY number DESC LIMIT diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index a3a7a162c3d..adc3957f872 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -21,6 +21,7 @@ impl ProtocolVersionsWeb3Dal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash FROM protocol_versions diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index f3a20ac39fa..10d2cfe6152 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -15,6 +15,13 @@ use zksync_utils::h256_to_u256; use crate::{models::storage_block::ResolvedL1BatchForL2Block, Core, CoreDal}; +/// Raw bytecode information returned by [`StorageWeb3Dal::get_contract_code_unchecked()`]. +#[derive(Debug)] +pub struct RawBytecode { + pub bytecode_hash: H256, + pub bytecode: Vec, +} + #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { pub(crate) storage: &'a mut Connection<'c, Core>, @@ -178,6 +185,8 @@ impl StorageWeb3Dal<'_, '_> { MAX(number) + 1 FROM l1_batches + WHERE + is_sealed ), ( SELECT @@ -232,16 +241,17 @@ impl StorageWeb3Dal<'_, '_> { &mut self, address: Address, block_number: L2BlockNumber, - ) -> DalResult>> { + ) -> DalResult> { let hashed_key = get_code_key(&address).hashed_key(); let row = sqlx::query!( r#" SELECT + bytecode_hash, bytecode FROM ( SELECT - * + value FROM storage_logs WHERE @@ -252,7 +262,7 @@ impl StorageWeb3Dal<'_, '_> { storage_logs.operation_number DESC LIMIT 1 - ) t + ) deploy_log JOIN factory_deps ON value = factory_deps.bytecode_hash WHERE value != $3 @@ -266,7 +276,11 @@ impl StorageWeb3Dal<'_, '_> { .with_arg("block_number", &block_number) .fetch_optional(self.storage) .await?; - Ok(row.map(|row| row.bytecode)) + + Ok(row.map(|row| RawBytecode { + bytecode_hash: H256::from_slice(&row.bytecode_hash), + bytecode: row.bytecode, + })) } /// Given bytecode hash, returns bytecode and L2 block number at which it was inserted. diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index c4043b6641e..55e6543c028 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -35,6 +35,8 @@ impl SyncDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ), ( SELECT @@ -50,6 +52,7 @@ impl SyncDal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + miniblocks.evm_emulator_code_hash, miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index db56b9d0e3e..755d0276910 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -10,10 +10,7 @@ use zksync_db_connection::{ }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; -use crate::{ - models::storage_tee_proof::StorageTeeProof, - tee_verifier_input_producer_dal::TeeVerifierInputProducerJobStatus, Core, -}; +use crate::{models::storage_tee_proof::StorageTeeProof, Core}; #[derive(Debug)] pub struct TeeProofGenerationDal<'a, 'c> { @@ -35,65 +32,76 @@ impl TeeProofGenerationDal<'_, '_> { &mut self, tee_type: TeeType, processing_timeout: Duration, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); - let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); - let query = sqlx::query!( + let min_batch_number = i64::from(min_batch_number.0); + sqlx::query!( r#" - UPDATE tee_proof_generation_details - SET - status = $1, - updated_at = NOW(), - prover_taken_at = NOW() - WHERE - tee_type = $2 - AND l1_batch_number = ( - SELECT - proofs.l1_batch_number - FROM - tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number - WHERE - inputs.status = $3 - AND ( - proofs.status = $4 + WITH upsert AS ( + SELECT + p.l1_batch_number + FROM + proof_generation_details p + LEFT JOIN + tee_proof_generation_details tee + ON + p.l1_batch_number = tee.l1_batch_number + AND tee.tee_type = $1 + WHERE + ( + p.l1_batch_number >= $5 + AND p.vm_run_data_blob_url IS NOT NULL + AND p.proof_gen_data_blob_url IS NOT NULL + ) + AND ( + tee.l1_batch_number IS NULL + OR ( + tee.status = $3 OR ( - proofs.status = $1 - AND proofs.prover_taken_at < NOW() - $5::INTERVAL + tee.status = $2 + AND tee.prover_taken_at < NOW() - $4::INTERVAL ) ) - AND proofs.l1_batch_number >= $6 - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) + ) + FETCH FIRST ROW ONLY + ) + + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at + ) + SELECT + l1_batch_number, + $1, + $2, + NOW(), + NOW(), + NOW() + FROM + upsert + ON CONFLICT (l1_batch_number, tee_type) DO + UPDATE + SET + status = $2, + updated_at = NOW(), + prover_taken_at = NOW() RETURNING - tee_proof_generation_details.l1_batch_number + l1_batch_number "#, - TeeProofGenerationJobStatus::PickedByProver.to_string(), tee_type.to_string(), - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, + TeeProofGenerationJobStatus::PickedByProver.to_string(), TeeProofGenerationJobStatus::Unpicked.to_string(), processing_timeout, min_batch_number - ); - - let batch_number = Instrumented::new("lock_batch_for_proving") - .with_arg("tee_type", &tee_type) - .with_arg("processing_timeout", &processing_timeout) - .with_arg("l1_batch_number", &min_batch_number) - .with(query) - .fetch_optional(self.storage) - .await? - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - - Ok(batch_number) + ) + .instrument("lock_batch_for_proving") + .with_arg("tee_type", &tee_type) + .with_arg("processing_timeout", &processing_timeout) + .with_arg("l1_batch_number", &min_batch_number) + .fetch_optional(self.storage) + .await + .map(|record| record.map(|record| L1BatchNumber(record.l1_batch_number as u32))) } pub async fn unlock_batch( @@ -176,38 +184,6 @@ impl TeeProofGenerationDal<'_, '_> { Ok(()) } - pub async fn insert_tee_proof_generation_job( - &mut self, - batch_number: L1BatchNumber, - tee_type: TeeType, - ) -> DalResult<()> { - let batch_number = i64::from(batch_number.0); - let query = sqlx::query!( - r#" - INSERT INTO - tee_proof_generation_details ( - l1_batch_number, tee_type, status, created_at, updated_at - ) - VALUES - ($1, $2, $3, NOW(), NOW()) - ON CONFLICT (l1_batch_number, tee_type) DO NOTHING - "#, - batch_number, - tee_type.to_string(), - TeeProofGenerationJobStatus::Unpicked.to_string(), - ); - let instrumentation = Instrumented::new("insert_tee_proof_generation_job") - .with_arg("l1_batch_number", &batch_number) - .with_arg("tee_type", &tee_type); - instrumentation - .clone() - .with(query) - .execute(self.storage) - .await?; - - Ok(()) - } - pub async fn save_attestation(&mut self, pubkey: &[u8], attestation: &[u8]) -> DalResult<()> { let query = sqlx::query!( r#" @@ -271,6 +247,40 @@ impl TeeProofGenerationDal<'_, '_> { Ok(proofs) } + /// For testing purposes only. + pub async fn insert_tee_proof_generation_job( + &mut self, + batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( + r#" + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + ON CONFLICT (l1_batch_number, tee_type) DO NOTHING + "#, + batch_number, + tee_type.to_string(), + TeeProofGenerationJobStatus::Unpicked.to_string(), + ); + let instrumentation = Instrumented::new("insert_tee_proof_generation_job") + .with_arg("l1_batch_number", &batch_number) + .with_arg("tee_type", &tee_type); + instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + + Ok(()) + } + + /// For testing purposes only. pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { let query = sqlx::query!( r#" @@ -278,18 +288,13 @@ impl TeeProofGenerationDal<'_, '_> { proofs.l1_batch_number FROM tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number WHERE - inputs.status = $1 - AND proofs.status = $2 + proofs.status = $1 ORDER BY proofs.l1_batch_number ASC LIMIT 1 "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, TeeProofGenerationJobStatus::Unpicked.to_string(), ); let batch_number = Instrumented::new("get_oldest_unpicked_batch") diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs deleted file mode 100644 index dddb451a2d7..00000000000 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ /dev/null @@ -1,234 +0,0 @@ -use std::time::{Duration, Instant}; - -use sqlx::postgres::types::PgInterval; -use zksync_db_connection::{ - connection::Connection, - error::DalResult, - instrument::InstrumentExt, - utils::{duration_to_naive_time, pg_interval_from_duration}, -}; -use zksync_types::L1BatchNumber; - -use crate::Core; - -#[derive(Debug)] -pub struct TeeVerifierInputProducerDal<'a, 'c> { - pub(crate) storage: &'a mut Connection<'c, Core>, -} - -/// The amount of attempts to process a job before giving up. -pub const JOB_MAX_ATTEMPT: i16 = 5; - -/// Time to wait for job to be processed -const JOB_PROCESSING_TIMEOUT: PgInterval = pg_interval_from_duration(Duration::from_secs(10 * 60)); - -/// Status of a job that the producer will work on. - -#[derive(Debug, sqlx::Type)] -#[sqlx(type_name = "tee_verifier_input_producer_job_status")] -pub enum TeeVerifierInputProducerJobStatus { - /// When the job is queued. Metadata calculator creates the job and marks it as queued. - Queued, - /// The job is not going to be processed. This state is designed for manual operations on DB. - /// It is expected to be used if some jobs should be skipped like: - /// - testing purposes (want to check a specific L1 Batch, I can mark everything before it skipped) - /// - trim down costs on some environments (if I've done breaking changes, - /// makes no sense to wait for everything to be processed, I can just skip them and save resources) - ManuallySkipped, - /// Currently being processed by one of the jobs. Transitory state, will transition to either - /// [`TeeVerifierInputProducerStatus::Successful`] or [`TeeVerifierInputProducerStatus::Failed`]. - InProgress, - /// The final (happy case) state we expect all jobs to end up. After the run is complete, - /// the job uploaded it's inputs, it lands in successful. - Successful, - /// The job failed for reasons. It will be marked as such and the error persisted in DB. - /// If it failed less than MAX_ATTEMPTs, the job will be retried, - /// otherwise it will stay in this state as final state. - Failed, -} - -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn create_tee_verifier_input_producer_job( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult<()> { - sqlx::query!( - r#" - INSERT INTO - tee_verifier_input_producer_jobs ( - l1_batch_number, status, created_at, updated_at - ) - VALUES - ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING - "#, - i64::from(l1_batch_number.0), - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - ) - .instrument("create_tee_verifier_input_producer_job") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn get_next_tee_verifier_input_producer_job( - &mut self, - ) -> DalResult> { - let l1_batch_number = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - attempts = attempts + 1, - updated_at = NOW(), - processing_started_at = NOW() - WHERE - l1_batch_number = ( - SELECT - l1_batch_number - FROM - tee_verifier_input_producer_jobs - WHERE - status = $2 - OR ( - status = $1 - AND processing_started_at < NOW() - $4::INTERVAL - ) - OR ( - status = $3 - AND attempts < $5 - ) - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) - RETURNING - tee_verifier_input_producer_jobs.l1_batch_number - "#, - TeeVerifierInputProducerJobStatus::InProgress as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - &JOB_PROCESSING_TIMEOUT, - JOB_MAX_ATTEMPT, - ) - .instrument("get_next_tee_verifier_input_producer_job") - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| L1BatchNumber(job.l1_batch_number as u32)); - - Ok(l1_batch_number) - } - - pub async fn get_tee_verifier_input_producer_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - tee_verifier_input_producer_jobs - WHERE - l1_batch_number = $1 - "#, - i64::from(l1_batch_number.0), - ) - .instrument("get_tee_verifier_input_producer_job_attempts") - .with_arg("l1_batch_number", &l1_batch_number) - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } - - pub async fn mark_job_as_successful( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - object_path: &str, - ) -> DalResult<()> { - sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - input_blob_url = $4 - WHERE - l1_batch_number = $2 - "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - object_path, - ) - .instrument("mark_job_as_successful") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn mark_job_as_failed( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - error: String, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - error = $4 - WHERE - l1_batch_number = $2 - AND status != $5 - RETURNING - tee_verifier_input_producer_jobs.attempts - "#, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - error, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - ) - .instrument("mark_job_as_failed") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } -} - -/// These functions should only be used for tests. -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn delete_all_jobs(&mut self) -> DalResult<()> { - sqlx::query!( - r#" - DELETE FROM tee_verifier_input_producer_jobs - "# - ) - .instrument("delete_all_tee_verifier_jobs") - .execute(self.storage) - .await?; - Ok(()) - } -} diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index f628638eeb6..baa2ee58485 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -50,12 +50,13 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { batch_fee_input: BatchFeeInput::l1_pegged(100, 100), base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(protocol_version), - pubdata_params: PubdataParams::default(), virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: PubdataParams::default(), } } + pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { L1BatchHeader::new( L1BatchNumber(number), @@ -63,6 +64,7 @@ pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: Some(H256::repeat_byte(43)), }, ProtocolVersionId::latest(), ) diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index b571231bf9c..6a5d0d92b07 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -10,9 +10,10 @@ use zksync_db_connection::{ utils::pg_interval_from_duration, }; use zksync_types::{ - block::L2BlockExecutionData, l1::L1Tx, l2::L2Tx, protocol_upgrade::ProtocolUpgradeTx, Address, - ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, L2BlockNumber, PriorityOpId, - ProtocolVersionId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, + block::L2BlockExecutionData, debug_flat_call::CallTraceMeta, l1::L1Tx, l2::L2Tx, + protocol_upgrade::ProtocolUpgradeTx, Address, ExecuteTransactionCommon, L1BatchNumber, + L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, H256, + PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::u256_to_big_decimal; use zksync_vm_interface::{ @@ -2189,12 +2190,17 @@ impl TransactionsDal<'_, '_> { Ok(data) } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { + pub async fn get_call_trace( + &mut self, + tx_hash: H256, + ) -> DalResult> { let row = sqlx::query!( r#" SELECT protocol_version, - index_in_block + index_in_block, + miniblocks.number AS "miniblock_number!", + miniblocks.hash AS "miniblocks_hash!" FROM transactions INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number @@ -2235,7 +2241,12 @@ impl TransactionsDal<'_, '_> { .map(|call_trace| { ( parse_call_trace(&call_trace.call_trace, protocol_version), - row.index_in_block.unwrap_or_default() as usize, + CallTraceMeta { + index_in_block: row.index_in_block.unwrap_or_default() as usize, + tx_hash, + block_number: row.miniblock_number as u32, + block_hash: H256::from_slice(&row.miniblocks_hash), + }, ) })) } diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index dcf5f25f104..c2209bb9c93 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -607,6 +607,39 @@ mod tests { ); } + #[tokio::test] + async fn getting_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + let mut tx = mock_l2_transaction(); + tx.execute.contract_address = None; + let tx_hash = tx.hash(); + prepare_transactions(&mut conn, vec![tx.clone()]).await; + + let fetched_tx = conn + .transactions_dal() + .get_tx_by_hash(tx_hash) + .await + .unwrap() + .expect("no transaction"); + let mut fetched_tx = L2Tx::try_from(fetched_tx).unwrap(); + assert_eq!(fetched_tx.execute.contract_address, None); + fetched_tx.raw_bytes = tx.raw_bytes.clone(); + assert_eq!(fetched_tx, tx); + + let web3_tx = conn + .transactions_web3_dal() + .get_transaction_by_position(L2BlockNumber(1), 0, L2ChainId::from(270)) + .await; + let web3_tx = web3_tx.unwrap().expect("no transaction"); + assert_eq!(web3_tx.hash, tx_hash); + assert_eq!(web3_tx.to, None); + } + #[tokio::test] async fn getting_receipts() { let connection_pool = ConnectionPool::::test_pool().await; @@ -621,7 +654,7 @@ mod tests { let tx2 = mock_l2_transaction(); let tx2_hash = tx2.hash(); - prepare_transactions(&mut conn, vec![tx1.clone(), tx2.clone()]).await; + prepare_transactions(&mut conn, vec![tx1, tx2]).await; let mut receipts = conn .transactions_web3_dal() @@ -636,6 +669,31 @@ mod tests { assert_eq!(receipts[1].transaction_hash, tx2_hash); } + #[tokio::test] + async fn getting_receipt_for_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut tx = mock_l2_transaction(); + let tx_hash = tx.hash(); + tx.execute.contract_address = None; + prepare_transactions(&mut conn, vec![tx]).await; + + let receipts = conn + .transactions_web3_dal() + .get_transaction_receipts(&[tx_hash]) + .await + .unwrap(); + assert_eq!(receipts.len(), 1); + let receipt = receipts.into_iter().next().unwrap(); + assert_eq!(receipt.transaction_hash, tx_hash); + assert_eq!(receipt.to, Some(Address::zero())); + } + #[tokio::test] async fn getting_l2_block_transactions() { let connection_pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs index b12b0218680..df0d3e86b88 100644 --- a/core/lib/dal/src/vm_runner_dal.rs +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -42,6 +42,8 @@ impl VmRunnerDal<'_, '_> { MAX(number) AS "last_batch" FROM l1_batches + WHERE + is_sealed ), processed_batches AS ( @@ -205,6 +207,8 @@ impl VmRunnerDal<'_, '_> { MAX(number) AS "last_batch" FROM l1_batches + WHERE + is_sealed ), processed_batches AS ( diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 980e238879b..0ea24ebf00d 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -76,6 +76,7 @@ mod tests { factory_deps_cache_size_mb: Some(128), initial_writes_cache_size_mb: Some(32), latest_values_cache_size_mb: Some(256), + latest_values_max_block_lag: Some(NonZeroU32::new(50).unwrap()), fee_history_limit: Some(100), max_batch_request_size: Some(200), max_response_body_size_mb: Some(10), @@ -137,6 +138,7 @@ mod tests { API_WEB3_JSON_RPC_FACTORY_DEPS_CACHE_SIZE_MB=128 API_WEB3_JSON_RPC_INITIAL_WRITES_CACHE_SIZE_MB=32 API_WEB3_JSON_RPC_LATEST_VALUES_CACHE_SIZE_MB=256 + API_WEB3_JSON_RPC_LATEST_VALUES_MAX_BLOCK_LAG=50 API_WEB3_JSON_RPC_FEE_HISTORY_LIMIT=100 API_WEB3_JSON_RPC_MAX_BATCH_REQUEST_SIZE=200 API_WEB3_JSON_RPC_WEBSOCKET_REQUESTS_PER_MINUTE_LIMIT=10 diff --git a/core/lib/env_config/src/chain.rs b/core/lib/env_config/src/chain.rs index a25c593bd88..a125f331496 100644 --- a/core/lib/env_config/src/chain.rs +++ b/core/lib/env_config/src/chain.rs @@ -102,6 +102,7 @@ mod tests { default_aa_hash: Some(hash( "0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066", )), + evm_emulator_hash: None, l1_batch_commit_data_generator_mode, max_circuits_per_batch: 24100, protective_reads_persistence_enabled: true, diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index a386adad1df..250cfe8f002 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -128,9 +128,9 @@ CONTRACTS_BASE_TOKEN_ADDR="0x0000000000000000000000000000000000000001" CONTRACTS_USER_FACING_BRIDGEHUB_PROXY_ADDR="0x35ea7f92f4c5f433efe15284e99c040110cf6297" CONTRACTS_USER_FACING_DIAMOND_PROXY_ADDR="0xF00B988a98Ca742e7958DeF9F7823b5908715f4a CONTRACTS_L2_NATIVE_TOKEN_VAULT_PROXY_ADDR="0xfc073319977e314f251eae6ae6be76b0b3baeecf" -CONTRACTS_L2_DA_VALIDATOR_ADDR="0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff" CONTRACTS_CHAIN_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff" CONTRACTS_SETTLEMENT_LAYER="0" +CONTRACTS_L2_DA_VALIDATOR_ADDR="0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff" "#; lock.set_env(config); diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 0fc3ad216f8..1043786fc1e 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -2,19 +2,34 @@ use std::env; use zksync_config::configs::{ da_client::{ - avail::AvailSecrets, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, - OBJECT_STORE_CLIENT_CONFIG_NAME, + avail::{ + AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, + }, + DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, }, secrets::DataAvailabilitySecrets, + AvailConfig, }; use crate::{envy_load, FromEnv}; impl FromEnv for DAClientConfig { fn from_env() -> anyhow::Result { - let client_tag = std::env::var("DA_CLIENT")?; + let client_tag = env::var("DA_CLIENT")?; let config = match client_tag.as_str() { - AVAIL_CLIENT_CONFIG_NAME => Self::Avail(envy_load("da_avail_config", "DA_")?), + AVAIL_CLIENT_CONFIG_NAME => Self::Avail(AvailConfig { + bridge_api_url: env::var("DA_BRIDGE_API_URL").ok().unwrap(), + timeout: env::var("DA_TIMEOUT")?.parse()?, + config: match env::var("DA_AVAIL_CLIENT_TYPE")?.as_str() { + AVAIL_FULL_CLIENT_NAME => { + AvailClientConfig::FullClient(envy_load("da_avail_full_client", "DA_")?) + } + AVAIL_GAS_RELAY_CLIENT_NAME => { + AvailClientConfig::GasRelay(envy_load("da_avail_gas_relay", "DA_")?) + } + _ => anyhow::bail!("Unknown Avail DA client type"), + }, + }), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -30,11 +45,21 @@ impl FromEnv for DataAvailabilitySecrets { let client_tag = std::env::var("DA_CLIENT")?; let secrets = match client_tag.as_str() { AVAIL_CLIENT_CONFIG_NAME => { - let seed_phrase = env::var("DA_SECRETS_SEED_PHRASE") - .ok() - .map(|s| s.parse()) - .transpose()?; - Self::Avail(AvailSecrets { seed_phrase }) + let seed_phrase: Option = + env::var("DA_SECRETS_SEED_PHRASE") + .ok() + .map(|s| s.parse().unwrap()); + let gas_relay_api_key: Option = + env::var("DA_SECRETS_GAS_RELAY_API_KEY") + .ok() + .map(|s| s.parse().unwrap()); + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + anyhow::bail!("No secrets provided for Avail DA client"); + } + Self::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) } _ => anyhow::bail!("Unknown DA client name: {}", client_tag), }; @@ -47,7 +72,10 @@ impl FromEnv for DataAvailabilitySecrets { mod tests { use zksync_config::{ configs::{ - da_client::{DAClientConfig, DAClientConfig::ObjectStore}, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::{self, ObjectStore}, + }, object_store::ObjectStoreMode::GCS, }, AvailConfig, ObjectStoreConfig, @@ -91,14 +119,14 @@ mod tests { bridge_api_url: &str, app_id: u32, timeout: usize, - max_retries: usize, ) -> DAClientConfig { DAClientConfig::Avail(AvailConfig { - api_node_url: api_node_url.to_string(), bridge_api_url: bridge_api_url.to_string(), - app_id, timeout, - max_retries, + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: api_node_url.to_string(), + app_id, + }), }) } @@ -107,11 +135,13 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" DA_CLIENT="Avail" - DA_API_NODE_URL="localhost:12345" + DA_AVAIL_CLIENT_TYPE="FullClient" + DA_BRIDGE_API_URL="localhost:54321" - DA_APP_ID="1" DA_TIMEOUT="2" - DA_MAX_RETRIES="3" + + DA_API_NODE_URL="localhost:12345" + DA_APP_ID="1" "#; lock.set_env(config); @@ -124,7 +154,6 @@ mod tests { "localhost:54321", "1".parse::().unwrap(), "2".parse::().unwrap(), - "3".parse::().unwrap(), ) ); } @@ -139,15 +168,18 @@ mod tests { lock.set_env(config); - let actual = match DataAvailabilitySecrets::from_env().unwrap() { - DataAvailabilitySecrets::Avail(avail) => avail.seed_phrase, + let (actual_seed, actual_key) = match DataAvailabilitySecrets::from_env().unwrap() { + DataAvailabilitySecrets::Avail(avail) => (avail.seed_phrase, avail.gas_relay_api_key), }; assert_eq!( - actual.unwrap(), - "bottom drive obey lake curtain smoke basket hold race lonely fit walk" - .parse() - .unwrap() + (actual_seed.unwrap(), actual_key), + ( + "bottom drive obey lake curtain smoke basket hold race lonely fit walk" + .parse() + .unwrap(), + None + ) ); } } diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index 7e9c4cc16ec..00b937fd725 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -44,7 +44,8 @@ impl FromEnv for GasAdjusterConfig { #[cfg(test)] mod tests { - use zksync_config::configs::eth_sender::{ProofSendingMode, PubdataSendingMode}; + use zksync_basic_types::pubdata_da::PubdataSendingMode; + use zksync_config::configs::eth_sender::ProofSendingMode; use super::*; use crate::test_utils::{hash, EnvMutex}; @@ -76,6 +77,7 @@ mod tests { tx_aggregation_paused: false, ignore_db_nonce: None, priority_tree_start_index: None, + time_in_mempool_in_l1_blocks_cap: 2000, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 20000000000, @@ -136,6 +138,7 @@ mod tests { ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG="30" ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS="4000000" ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" + ETH_SENDER_SENDER_TIME_IN_MEMPOOL_IN_L1_BLOCKS_CAP="2000" ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" ETH_SENDER_SENDER_PUBDATA_SENDING_MODE="Calldata" diff --git a/core/lib/env_config/src/genesis.rs b/core/lib/env_config/src/genesis.rs index bf30fd4cc33..55c79eceb50 100644 --- a/core/lib/env_config/src/genesis.rs +++ b/core/lib/env_config/src/genesis.rs @@ -68,6 +68,7 @@ impl FromEnv for GenesisConfig { genesis_commitment: contracts_config.genesis_batch_commitment, bootloader_hash: state_keeper.bootloader_hash, default_aa_hash: state_keeper.default_aa_hash, + evm_emulator_hash: state_keeper.evm_emulator_hash, // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network l1_chain_id: L1ChainId(network_config.network.chain_id().0), sl_chain_id: Some(network_config.network.chain_id()), diff --git a/core/lib/env_config/src/proof_data_handler.rs b/core/lib/env_config/src/proof_data_handler.rs index f69aa1d6dc5..b5bfda4544e 100644 --- a/core/lib/env_config/src/proof_data_handler.rs +++ b/core/lib/env_config/src/proof_data_handler.rs @@ -4,12 +4,18 @@ use crate::{envy_load, FromEnv}; impl FromEnv for ProofDataHandlerConfig { fn from_env() -> anyhow::Result { - envy_load("proof_data_handler", "PROOF_DATA_HANDLER_") + Ok(Self { + tee_config: envy_load("proof_data_handler.tee", "PROOF_DATA_HANDLER_")?, + ..envy_load("proof_data_handler", "PROOF_DATA_HANDLER_")? + }) } } #[cfg(test)] mod tests { + use zksync_basic_types::L1BatchNumber; + use zksync_config::configs::TeeConfig; + use super::*; use crate::test_utils::EnvMutex; @@ -19,7 +25,10 @@ mod tests { ProofDataHandlerConfig { http_port: 3320, proof_generation_timeout_in_secs: 18000, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(1337), + }, } } @@ -29,6 +38,7 @@ mod tests { PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS="18000" PROOF_DATA_HANDLER_HTTP_PORT="3320" PROOF_DATA_HANDLER_TEE_SUPPORT="true" + PROOF_DATA_HANDLER_FIRST_TEE_PROCESSED_BATCH="1337" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 4048b471862..150bc8cbd54 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -368,6 +368,7 @@ where .into_iter() .take(chunk_size) .zip(fee_history.base_fee_per_blob_gas) + .take(chunk_size) { let fees = BaseFees { base_fee_per_gas: cast_to_u64(base, "base_fee_per_gas")?, @@ -424,16 +425,12 @@ where let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); let chunk_size = chunk_end - chunk_start + 1; - let fee_history = EthNamespaceClient::fee_history( - client, - U64::from(chunk_size), - zksync_types::api::BlockNumber::from(chunk_end), - None, - ) - .rpc_context("fee_history") - .with_arg("chunk_size", &chunk_size) - .with_arg("block", &chunk_end) - .await?; + let fee_history = client + .fee_history(U64::from(chunk_size).into(), chunk_end.into(), None) + .rpc_context("fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("block", &chunk_end) + .await?; if fee_history.inner.oldest_block != web3::BlockNumber::Number(chunk_start.into()) { let oldest_block = match fee_history.inner.oldest_block { diff --git a/core/lib/eth_client/src/types.rs b/core/lib/eth_client/src/types.rs index 59fb1cdeddc..dd332351afb 100644 --- a/core/lib/eth_client/src/types.rs +++ b/core/lib/eth_client/src/types.rs @@ -320,7 +320,7 @@ pub struct FailureInfo { #[cfg(test)] mod tests { - use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; + use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_types::{ eth_sender::{EthTxBlobSidecarV1, SidecarBlobV1}, web3, K256PrivateKey, EIP_4844_TX_TYPE, H256, U256, U64, @@ -384,10 +384,7 @@ mod tests { .as_ref(), )]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction.clone()); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with @@ -493,10 +490,7 @@ mod tests { blob_versioned_hashes: Some(vec![versioned_hash_1, versioned_hash_2]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index f760134e09b..92bb47824f3 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -11,10 +11,9 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_types.workspace = true +zksync_basic_types.workspace = true +zksync_crypto_primitives.workspace = true + +async-trait.workspace = true rlp.workspace = true thiserror.workspace = true -async-trait.workspace = true - -[dev-dependencies] -tokio = { workspace = true, features = ["full"] } diff --git a/core/lib/eth_signer/src/error.rs b/core/lib/eth_signer/src/error.rs deleted file mode 100644 index 8b137891791..00000000000 --- a/core/lib/eth_signer/src/error.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/lib/eth_signer/src/lib.rs b/core/lib/eth_signer/src/lib.rs index 3a92d47b062..8b6025eb15d 100644 --- a/core/lib/eth_signer/src/lib.rs +++ b/core/lib/eth_signer/src/lib.rs @@ -1,5 +1,6 @@ use async_trait::async_trait; -use zksync_types::{Address, EIP712TypedStructure, Eip712Domain, PackedEthSignature}; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{EIP712TypedStructure, Eip712Domain, PackedEthSignature}; pub use crate::{pk_signer::PrivateKeySigner, raw_ethereum_tx::TransactionParameters}; diff --git a/core/lib/eth_signer/src/pk_signer.rs b/core/lib/eth_signer/src/pk_signer.rs index 47b0e110991..0f55425a0d5 100644 --- a/core/lib/eth_signer/src/pk_signer.rs +++ b/core/lib/eth_signer/src/pk_signer.rs @@ -1,5 +1,7 @@ -use zksync_types::{ - Address, EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, +use async_trait::async_trait; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{ + EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, }; use crate::{ @@ -12,22 +14,20 @@ pub struct PrivateKeySigner { private_key: K256PrivateKey, } +// We define inherent methods duplicating `EthereumSigner` ones because they are sync and (other than `sign_typed_data`) infallible. impl PrivateKeySigner { pub fn new(private_key: K256PrivateKey) -> Self { Self { private_key } } -} -#[async_trait::async_trait] -impl EthereumSigner for PrivateKeySigner { - /// Get Ethereum address that matches the private key. - async fn get_address(&self) -> Result { - Ok(self.private_key.address()) + /// Gets an Ethereum address that matches this private key. + pub fn address(&self) -> Address { + self.private_key.address() } /// Signs typed struct using Ethereum private key by EIP-712 signature standard. /// Result of this function is the equivalent of RPC calling `eth_signTypedData`. - async fn sign_typed_data( + pub fn sign_typed_data( &self, domain: &Eip712Domain, typed_struct: &S, @@ -39,16 +39,11 @@ impl EthereumSigner for PrivateKeySigner { } /// Signs and returns the RLP-encoded transaction. - async fn sign_transaction( - &self, - raw_tx: TransactionParameters, - ) -> Result, SignerError> { + pub fn sign_transaction(&self, raw_tx: TransactionParameters) -> Vec { // According to the code in web3 // We should use `max_fee_per_gas` as `gas_price` if we use EIP1559 let gas_price = raw_tx.max_fee_per_gas; - let max_priority_fee_per_gas = raw_tx.max_priority_fee_per_gas; - let tx = Transaction { to: raw_tx.to, nonce: raw_tx.nonce, @@ -62,21 +57,42 @@ impl EthereumSigner for PrivateKeySigner { max_fee_per_blob_gas: raw_tx.max_fee_per_blob_gas, blob_versioned_hashes: raw_tx.blob_versioned_hashes, }; - let signed = tx.sign(&self.private_key, raw_tx.chain_id); - Ok(signed.raw_transaction.0) + signed.raw_transaction.0 + } +} + +#[async_trait] +impl EthereumSigner for PrivateKeySigner { + async fn get_address(&self) -> Result { + Ok(self.address()) + } + + async fn sign_typed_data( + &self, + domain: &Eip712Domain, + typed_struct: &S, + ) -> Result { + self.sign_typed_data(domain, typed_struct) + } + + async fn sign_transaction( + &self, + raw_tx: TransactionParameters, + ) -> Result, SignerError> { + Ok(self.sign_transaction(raw_tx)) } } #[cfg(test)] mod test { - use zksync_types::{K256PrivateKey, H160, H256, U256, U64}; + use zksync_basic_types::{H160, H256, U256, U64}; + use zksync_crypto_primitives::K256PrivateKey; - use super::PrivateKeySigner; - use crate::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; + use super::*; - #[tokio::test] - async fn test_generating_signed_raw_transaction() { + #[test] + fn test_generating_signed_raw_transaction() { let private_key = K256PrivateKey::from_bytes(H256::from([5; 32])).unwrap(); let signer = PrivateKeySigner::new(private_key); let raw_transaction = TransactionParameters { @@ -94,10 +110,7 @@ mod test { blob_versioned_hashes: None, max_fee_per_blob_gas: None, }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); assert_ne!(raw_tx.len(), 1); // pre-calculated signature with right algorithm implementation let precalculated_raw_tx: Vec = vec![ diff --git a/core/lib/eth_signer/src/raw_ethereum_tx.rs b/core/lib/eth_signer/src/raw_ethereum_tx.rs index 9479b5bd9d7..bea64305b47 100644 --- a/core/lib/eth_signer/src/raw_ethereum_tx.rs +++ b/core/lib/eth_signer/src/raw_ethereum_tx.rs @@ -10,11 +10,11 @@ //! Link to @Deniallugo's PR to web3: https://github.com/tomusdrw/rust-web3/pull/630 use rlp::RlpStream; -use zksync_types::{ - ethabi::Address, +use zksync_basic_types::{ web3::{keccak256, AccessList, Signature, SignedTransaction}, - K256PrivateKey, H256, U256, U64, + Address, H256, U256, U64, }; +use zksync_crypto_primitives::K256PrivateKey; const LEGACY_TX_ID: u64 = 0; const ACCESSLISTS_TX_ID: u64 = 1; diff --git a/core/lib/external_price_api/Cargo.toml b/core/lib/external_price_api/Cargo.toml index 3eee675b4e6..1e849f60006 100644 --- a/core/lib/external_price_api/Cargo.toml +++ b/core/lib/external_price_api/Cargo.toml @@ -20,8 +20,12 @@ serde.workspace = true reqwest = { workspace = true, features = ["json"] } fraction.workspace = true rand.workspace = true +tracing.workspace = true zksync_config.workspace = true zksync_types.workspace = true tokio.workspace = true + +[dev-dependencies] httpmock.workspace = true +serde_json.workspace = true diff --git a/core/lib/external_price_api/src/cmc_api.rs b/core/lib/external_price_api/src/cmc_api.rs new file mode 100644 index 00000000000..05cb5e4d728 --- /dev/null +++ b/core/lib/external_price_api/src/cmc_api.rs @@ -0,0 +1,357 @@ +use std::{collections::HashMap, str::FromStr}; + +use async_trait::async_trait; +use chrono::Utc; +use serde::Deserialize; +use tokio::sync::RwLock; +use url::Url; +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; + +use crate::{address_to_string, utils::get_fraction, PriceAPIClient}; + +const AUTH_HEADER: &str = "x-cmc_pro_api_key"; +const DEFAULT_API_URL: &str = "https://pro-api.coinmarketcap.com"; +const ALLOW_TOKENS_ONLY_ON_PLATFORM_ID: i32 = 1; // 1 = Ethereum +const REQUEST_QUOTE_IN_CURRENCY_ID: &str = "1027"; // 1027 = ETH + +#[derive(Debug)] +pub struct CmcPriceApiClient { + base_url: Url, + client: reqwest::Client, + cache_token_id_by_address: RwLock>, +} + +impl CmcPriceApiClient { + pub fn new(config: ExternalPriceApiClientConfig) -> Self { + let client = if let Some(api_key) = &config.api_key { + use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; + + let default_headers = HeaderMap::from_iter([( + HeaderName::from_static(AUTH_HEADER), + HeaderValue::from_str(api_key).expect("Failed to create header value"), + )]); + + reqwest::Client::builder().default_headers(default_headers) + } else { + reqwest::Client::builder() + } + .timeout(config.client_timeout()) + .build() + .expect("Failed to build reqwest client"); + + let base_url = config.base_url.unwrap_or(DEFAULT_API_URL.to_string()); + let base_url = Url::parse(&base_url).expect("Failed to parse CoinMarketCap API URL"); + + Self { + base_url, + client, + cache_token_id_by_address: RwLock::default(), + } + } + + fn get(&self, path: &str) -> reqwest::RequestBuilder { + self.client + .get(self.base_url.join(path).expect("Failed to join URL path")) + } + + async fn get_token_id(&self, address: Address) -> anyhow::Result { + if let Some(x) = self.cache_token_id_by_address.read().await.get(&address) { + return Ok(*x); + } + + let response = self.get("/v1/cryptocurrency/map").send().await?; + let status = response.status(); + if !status.is_success() { + return Err(anyhow::anyhow!( + "Http error while fetching token id. Status: {status}, token: {address}, msg: {}", + response.text().await.unwrap_or_default(), + )); + } + + let parsed = response.json::().await?; + for token_info in parsed.data { + if let Some(platform) = token_info.platform { + if platform.id == ALLOW_TOKENS_ONLY_ON_PLATFORM_ID + && Address::from_str(&platform.token_address).is_ok_and(|a| a == address) + { + if token_info.is_active != 1 { + tracing::warn!( + "CoinMarketCap API reports token {} ({}) on platform {} ({}) is not active", + address_to_string(&address), + token_info.name, + platform.id, + platform.name, + ); + } + + self.cache_token_id_by_address + .write() + .await + .insert(address, token_info.id); + return Ok(token_info.id); + } + } + } + + Err(anyhow::anyhow!("Token ID not found for address {address}")) + } + + async fn get_token_price_by_address(&self, address: Address) -> anyhow::Result { + let id = self.get_token_id(address).await?; + self.get_token_price_by_id(id).await + } + + async fn get_token_price_by_id(&self, id: i32) -> anyhow::Result { + let response = self + .get("/v2/cryptocurrency/quotes/latest") + .query(&[("id", id)]) + .query(&[("convert_id", REQUEST_QUOTE_IN_CURRENCY_ID)]) + .send() + .await?; + + let status = response.status(); + if !status.is_success() { + return Err(anyhow::anyhow!( + "Http error while fetching token price. Status: {status}, token: {id}, msg: {}", + response.text().await.unwrap_or_default(), + )); + } + + response + .json::() + .await? + .data + .get(&id) + .and_then(|data| data.quote.get(REQUEST_QUOTE_IN_CURRENCY_ID)) + .map(|mq| mq.price) + .ok_or_else(|| anyhow::anyhow!("Price not found for token: {id}")) + } +} + +#[derive(Debug, Deserialize)] +struct V2CryptocurrencyQuotesLatestResponse { + data: HashMap, +} + +#[derive(Debug, Deserialize)] +struct CryptocurrencyQuoteObject { + quote: HashMap, +} + +#[derive(Debug, Deserialize)] +struct MarketQuote { + price: f64, +} + +#[derive(Debug, Deserialize)] +struct V1CryptocurrencyMapResponse { + data: Vec, +} + +#[derive(Debug, Deserialize)] +struct CryptocurrencyObject { + id: i32, + name: String, + is_active: u8, + platform: Option, +} + +#[derive(Debug, Deserialize)] +struct CryptocurrencyPlatform { + id: i32, + name: String, + token_address: String, +} + +#[async_trait] +impl PriceAPIClient for CmcPriceApiClient { + async fn fetch_ratio(&self, token_address: Address) -> anyhow::Result { + let base_token_in_eth = self.get_token_price_by_address(token_address).await?; + let (term_ether, term_base_token) = get_fraction(base_token_in_eth)?; + + return Ok(BaseTokenAPIRatio { + numerator: term_base_token, + denominator: term_ether, + ratio_timestamp: Utc::now(), + }); + } +} + +#[cfg(test)] +mod tests { + use httpmock::prelude::*; + use serde_json::json; + + use super::*; + use crate::tests::*; + + fn make_client(server: &MockServer, api_key: Option) -> Box { + Box::new(CmcPriceApiClient::new(ExternalPriceApiClientConfig { + source: "coinmarketcap".to_string(), + base_url: Some(server.base_url()), + api_key, + client_timeout_ms: 5000, + forced: None, + })) + } + + fn make_mock_server() -> MockServer { + let mock_server = MockServer::start(); + // cryptocurrency map + mock_server.mock(|when, then| { + when.method(GET) + .header_exists(AUTH_HEADER) + .path("/v1/cryptocurrency/map"); + then.status(200) + .header("content-type", "application/json") + .json_body(json!({ + "status": { + "timestamp": "2024-09-25T11:29:38.440Z", + "error_code": 0, + "error_message": null, + "elapsed": 351, + "credit_count": 1, + "notice": null + }, + "data": [ + { + "id": 7083, + "rank": 26, + "name": "Uniswap", + "symbol": "UNI", + "slug": "uniswap", + "is_active": 1, + "first_historical_data": "2020-09-17T01:10:00.000Z", + "last_historical_data": "2024-09-25T11:25:00.000Z", + "platform": { + "id": 1, + "name": "Ethereum", + "symbol": "ETH", + "slug": "ethereum", + "token_address": "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984" + } + } + ] + })); + }); + + // cryptocurrency quote + mock_server.mock(|when, then| { + // TODO: check for api authentication header + when.method(GET) + .header_exists(AUTH_HEADER) + .path("/v2/cryptocurrency/quotes/latest") + .query_param("id", "7083") // Uniswap + .query_param("convert_id", "1027"); // Ether + then.status(200) + .header("content-type", "application/json") + .json_body(json!({ + "status": { + "timestamp": "2024-10-02T14:15:07.189Z", + "error_code": 0, + "error_message": null, + "elapsed": 39, + "credit_count": 1, + "notice": null + }, + "data": { + "7083": { + "id": 7083, + "name": "Uniswap", + "symbol": "UNI", + "slug": "uniswap", + "date_added": "2020-09-17T00:00:00.000Z", + "tags": [], + "max_supply": null, + "circulating_supply": 600294743.71, + "total_supply": 1000000000, + "platform": { + "id": 1027, + "name": "Ethereum", + "symbol": "ETH", + "slug": "ethereum", + "token_address": "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984" + }, + "is_active": 1, + "infinite_supply": false, + "cmc_rank": 22, + "is_fiat": 0, + "last_updated": "2024-10-02T14:13:00.000Z", + "quote": { + "1027": { + "price": 0.0028306661720164175, + "last_updated": "2024-10-02T14:12:00.000Z" + } + } + } + } + })); + }); + + mock_server + } + + #[tokio::test] + async fn mock_happy() { + let server = make_mock_server(); + let client = make_client( + &server, + Some("00000000-0000-0000-0000-000000000000".to_string()), + ); + + let token_address: Address = TEST_TOKEN_ADDRESS.parse().unwrap(); + + let api_price = client.fetch_ratio(token_address).await.unwrap(); + + const REPORTED_PRICE: f64 = 1_f64 / 0.0028306661720164175_f64; + const EPSILON: f64 = 0.000001_f64 * REPORTED_PRICE; + + assert!((approximate_value(&api_price) - REPORTED_PRICE).abs() < EPSILON); + } + + #[tokio::test] + #[should_panic = "Request did not match any route or mock"] + async fn mock_fail_no_api_key() { + let server = make_mock_server(); + let client = make_client(&server, None); + + let token_address: Address = TEST_TOKEN_ADDRESS.parse().unwrap(); + + client.fetch_ratio(token_address).await.unwrap(); + } + + #[tokio::test] + #[should_panic = "Token ID not found for address"] + async fn mock_fail_not_found() { + let server = make_mock_server(); + let client = make_client( + &server, + Some("00000000-0000-0000-0000-000000000000".to_string()), + ); + + let token_address: Address = Address::random(); + + client.fetch_ratio(token_address).await.unwrap(); + } + + #[tokio::test] + #[ignore = "run manually (accesses network); specify CoinMarketCap API key in env var CMC_API_KEY"] + async fn real_cmc_tether() { + let client = CmcPriceApiClient::new(ExternalPriceApiClientConfig { + api_key: Some(std::env::var("CMC_API_KEY").unwrap()), + base_url: None, + client_timeout_ms: 5000, + source: "coinmarketcap".to_string(), + forced: None, + }); + + let tether: Address = "0xdac17f958d2ee523a2206206994597c13d831ec7" + .parse() + .unwrap(); + + let r = client.get_token_price_by_address(tether).await.unwrap(); + + println!("{r}"); + } +} diff --git a/core/lib/external_price_api/src/lib.rs b/core/lib/external_price_api/src/lib.rs index 7a068f9b1cb..01fc433802b 100644 --- a/core/lib/external_price_api/src/lib.rs +++ b/core/lib/external_price_api/src/lib.rs @@ -1,3 +1,4 @@ +pub mod cmc_api; pub mod coingecko_api; pub mod forced_price_client; #[cfg(test)] diff --git a/core/lib/external_price_api/src/tests.rs b/core/lib/external_price_api/src/tests.rs index bb2af866cf5..fd6a8b9928f 100644 --- a/core/lib/external_price_api/src/tests.rs +++ b/core/lib/external_price_api/src/tests.rs @@ -2,13 +2,13 @@ use std::str::FromStr; use chrono::Utc; use httpmock::MockServer; -use zksync_types::Address; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; use crate::PriceAPIClient; const TIME_TOLERANCE_MS: i64 = 100; /// Uniswap (UNI) -const TEST_TOKEN_ADDRESS: &str = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"; +pub const TEST_TOKEN_ADDRESS: &str = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"; /// 1UNI = 0.00269ETH const TEST_TOKEN_PRICE_ETH: f64 = 0.00269; /// 1ETH = 371.74UNI; When converting gas price from ETH to UNI @@ -16,6 +16,10 @@ const TEST_TOKEN_PRICE_ETH: f64 = 0.00269; const TEST_BASE_PRICE: f64 = 371.74; const PRICE_FLOAT_COMPARE_TOLERANCE: f64 = 0.1; +pub(crate) fn approximate_value(api_price: &BaseTokenAPIRatio) -> f64 { + api_price.numerator.get() as f64 / api_price.denominator.get() as f64 +} + pub(crate) struct SetupResult { pub(crate) client: Box, } diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml index 4a190f82efc..f0e734e0668 100644 --- a/core/lib/l1_contract_interface/Cargo.toml +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -20,12 +20,14 @@ crypto_codegen.workspace = true # Used to calculate the kzg commitment and proofs kzg.workspace = true +anyhow.workspace = true sha2.workspace = true sha3.workspace = true hex.workspace = true once_cell.workspace = true [dev-dependencies] +rand.workspace = true serde.workspace = true serde_json.workspace = true serde_with = { workspace = true, features = ["base64", "hex"] } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs index a805876ca40..5a05cb0ffa5 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -1,7 +1,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi::{encode, Token}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, }; use crate::{ @@ -14,7 +14,7 @@ use crate::{ pub struct CommitBatches<'a> { pub last_committed_l1_batch: &'a L1BatchWithMetadata, pub l1_batches: &'a [L1BatchWithMetadata], - pub pubdata_da: PubdataDA, + pub pubdata_da: PubdataSendingMode, pub mode: L1BatchCommitmentMode, } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index f9dcdaaed10..0240acba350 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -6,7 +6,7 @@ use zksync_types::{ L1BatchWithMetadata, }, ethabi::{ParamType, Token}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, web3::{contract::Error as ContractError, keccak256}, ProtocolVersionId, H256, U256, }; @@ -26,14 +26,14 @@ const PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY: u8 = 2; pub struct CommitBatchInfo<'a> { mode: L1BatchCommitmentMode, l1_batch_with_metadata: &'a L1BatchWithMetadata, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, } impl<'a> CommitBatchInfo<'a> { pub fn new( mode: L1BatchCommitmentMode, l1_batch_with_metadata: &'a L1BatchWithMetadata, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, ) -> Self { Self { mode, @@ -221,22 +221,22 @@ impl Tokenizable for CommitBatchInfo<'_> { // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. ( L1BatchCommitmentMode::Validium, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { vec![PUBDATA_SOURCE_CALLDATA] } - (L1BatchCommitmentMode::Validium, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Blobs) => { vec![PUBDATA_SOURCE_BLOBS] } - (L1BatchCommitmentMode::Rollup, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Custom) => { panic!("Custom pubdata DA is incompatible with Rollup mode") } - (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { vec![PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY] } ( L1BatchCommitmentMode::Rollup, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { // We compute and add the blob commitment to the pubdata payload so that we can verify the proof // even if we are not using blobs. @@ -247,7 +247,7 @@ impl Tokenizable for CommitBatchInfo<'_> { .chain(blob_commitment) .collect() } - (L1BatchCommitmentMode::Rollup, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Blobs) => { let pubdata = self.pubdata_input(); let pubdata_commitments = pubdata.chunks(ZK_SYNC_BYTES_PER_BLOB).flat_map(|blob| { @@ -267,7 +267,7 @@ impl Tokenizable for CommitBatchInfo<'_> { .expect("Failed to get state_diff_hash from metadata"); tokens.push(Token::Bytes(match (self.mode, self.pubdata_da) { // Validiums with custom DA need the inclusion data to be part of operator_da_input - (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { let mut operator_da_input: Vec = state_diff_hash.0.into(); operator_da_input.extend( @@ -284,14 +284,16 @@ impl Tokenizable for CommitBatchInfo<'_> { // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. ( L1BatchCommitmentMode::Validium, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata | PubdataDA::Blobs, + PubdataSendingMode::Calldata + | PubdataSendingMode::RelayedL2Calldata + | PubdataSendingMode::Blobs, ) => state_diff_hash.0.into(), - (L1BatchCommitmentMode::Rollup, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Custom) => { panic!("Custom pubdata DA is incompatible with Rollup mode") } ( L1BatchCommitmentMode::Rollup, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { let pubdata = self.pubdata_input(); @@ -308,7 +310,7 @@ impl Tokenizable for CommitBatchInfo<'_> { .chain(blob_commitment) .collect() } - (L1BatchCommitmentMode::Rollup, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Blobs) => { let pubdata = self.pubdata_input(); let header = diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs index b6d2eefac30..b71d0938049 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -4,4 +4,7 @@ mod commit_batch_info; mod stored_batch_info; pub const SUPPORTED_ENCODING_VERSION: u8 = 0; +#[cfg(test)] +mod tests; + pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs index 18b28f34c29..5ac40bce66e 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs @@ -1,7 +1,8 @@ +use anyhow::Context as _; use zksync_types::{ commitment::L1BatchWithMetadata, ethabi::{self, ParamType, Token}, - web3, + parse_h256, web3, web3::contract::Error as ContractError, H256, U256, }; @@ -9,7 +10,7 @@ use zksync_types::{ use crate::Tokenizable; /// `StoredBatchInfo` from `IExecutor.sol`. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct StoredBatchInfo { pub batch_number: u64, pub batch_hash: H256, @@ -22,11 +23,22 @@ pub struct StoredBatchInfo { } impl StoredBatchInfo { + /// Encodes the struct into RLP. + pub fn encode(&self) -> Vec { + ethabi::encode(&[self.clone().into_token()]) + } + + /// Decodes the struct from RLP. + pub fn decode(rlp: &[u8]) -> anyhow::Result { + let [token] = ethabi::decode_whole(&[Self::schema()], rlp)? + .try_into() + .unwrap(); + Ok(Self::from_token(token)?) + } + /// `_hashStoredBatchInfo` from `Executor.sol`. pub fn hash(&self) -> H256 { - H256(web3::keccak256(ðabi::encode(&[self - .clone() - .into_token()]))) + H256(web3::keccak256(&self.encode())) } pub fn schema() -> ParamType { @@ -59,11 +71,42 @@ impl From<&L1BatchWithMetadata> for StoredBatchInfo { } impl Tokenizable for StoredBatchInfo { - fn from_token(_token: Token) -> Result { - // Currently there is no need to decode this struct. - // We still want to implement `Tokenizable` trait for it, so that *once* it's needed - // the implementation is provided here and not in some other inconsistent way. - Err(ContractError::Other("Not implemented".into())) + fn from_token(token: Token) -> Result { + (|| { + let [ + Token::Uint(batch_number), + Token::FixedBytes(batch_hash), + Token::Uint(index_repeated_storage_changes), + Token::Uint(number_of_layer1_txs), + Token::FixedBytes(priority_operations_hash), + Token::FixedBytes(l2_logs_tree_root), + Token::Uint(timestamp), + Token::FixedBytes(commitment), + ] : [Token;8] = token + .into_tuple().context("not a tuple")? + .try_into().ok().context("bad length")? + else { anyhow::bail!("bad format") }; + Ok(Self { + batch_number: batch_number + .try_into() + .ok() + .context("overflow") + .context("batch_number")?, + batch_hash: parse_h256(&batch_hash).context("batch_hash")?, + index_repeated_storage_changes: index_repeated_storage_changes + .try_into() + .ok() + .context("overflow") + .context("index_repeated_storage_changes")?, + number_of_layer1_txs, + priority_operations_hash: parse_h256(&priority_operations_hash) + .context("priority_operations_hash")?, + l2_logs_tree_root: parse_h256(&l2_logs_tree_root).context("l2_logs_tree_root")?, + timestamp, + commitment: parse_h256(&commitment).context("commitment")?, + }) + })() + .map_err(|err| ContractError::InvalidOutputType(format!("{err:#}"))) } fn into_token(self) -> Token { diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs b/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs new file mode 100644 index 00000000000..0cb8caffb34 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs @@ -0,0 +1,32 @@ +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use super::*; + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> StoredBatchInfo { + StoredBatchInfo { + batch_number: rng.gen(), + batch_hash: rng.gen(), + index_repeated_storage_changes: rng.gen(), + number_of_layer1_txs: rng.gen::().into(), + priority_operations_hash: rng.gen(), + l2_logs_tree_root: rng.gen(), + timestamp: rng.gen::().into(), + commitment: rng.gen(), + } + } +} + +/// Test checking encoding and decoding of `StoredBatchInfo`. +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + for _ in 0..10 { + let want: StoredBatchInfo = rng.gen(); + let got = StoredBatchInfo::decode(&want.encode()).unwrap(); + assert_eq!(want, got); + } +} diff --git a/core/lib/l1_contract_interface/src/multicall3/mod.rs b/core/lib/l1_contract_interface/src/multicall3/mod.rs index 7d922668f94..52df37e0430 100644 --- a/core/lib/l1_contract_interface/src/multicall3/mod.rs +++ b/core/lib/l1_contract_interface/src/multicall3/mod.rs @@ -7,6 +7,7 @@ use zksync_types::{ }; /// Multicall3 contract aggregate method input vector struct. +#[derive(Debug)] pub struct Multicall3Call { pub target: Address, pub allow_failure: bool, @@ -21,6 +22,7 @@ impl Tokenizable for Multicall3Call { self.calldata.into_token(), ]) } + fn from_token(token: Token) -> Result { let Token::Tuple(mut result_token) = token else { return Err(error(&[token], "Multicall3Call")); diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index 334a4783a76..f6f9b72f9b6 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -1,4 +1,4 @@ -use std::collections::{hash_map, BTreeSet, HashMap, HashSet}; +use std::collections::{hash_map, BTreeSet, HashMap}; use zksync_types::{ l1::L1Tx, l2::L2Tx, Address, ExecuteTransactionCommon, Nonce, PriorityOpId, Transaction, @@ -221,22 +221,57 @@ impl MempoolStore { } fn gc(&mut self) -> Vec
{ - if self.size >= self.capacity { - let index: HashSet<_> = self + if self.size > self.capacity { + let mut transactions = std::mem::take(&mut self.l2_transactions_per_account); + let mut possibly_kept: Vec<_> = self .l2_priority_queue .iter() - .map(|pointer| pointer.account) + .rev() + .filter_map(|pointer| { + transactions + .remove(&pointer.account) + .map(|txs| (pointer.account, txs)) + }) .collect(); - let transactions = std::mem::take(&mut self.l2_transactions_per_account); - let (kept, drained) = transactions + + let mut sum = 0; + let mut number_of_accounts_kept = 0; + for (_, txs) in &possibly_kept { + sum += txs.len(); + if sum <= self.capacity as usize { + number_of_accounts_kept += 1; + } else { + break; + } + } + if number_of_accounts_kept == 0 && !possibly_kept.is_empty() { + tracing::warn!("mempool capacity is too low to handle txs from single account, consider increasing capacity"); + // Keep at least one entry, otherwise mempool won't return any new L2 tx to process. + number_of_accounts_kept = 1; + } + let (kept, drained) = { + let mut drained: Vec<_> = transactions.into_keys().collect(); + let also_drained = possibly_kept + .split_off(number_of_accounts_kept) + .into_iter() + .map(|(address, _)| address); + drained.extend(also_drained); + + (possibly_kept, drained) + }; + + let l2_priority_queue = std::mem::take(&mut self.l2_priority_queue); + self.l2_priority_queue = l2_priority_queue .into_iter() - .partition(|(address, _)| index.contains(address)); - self.l2_transactions_per_account = kept; + .rev() + .take(number_of_accounts_kept) + .collect(); + self.l2_transactions_per_account = kept.into_iter().collect(); self.size = self .l2_transactions_per_account .iter() - .fold(0, |agg, (_, tnxs)| agg + tnxs.len() as u64); - return drained.into_keys().collect(); + .fold(0, |agg, (_, txs)| agg + txs.len() as u64); + return drained; } vec![] } diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index 96ef600984f..b84ab7d5765 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -321,32 +321,26 @@ fn stashed_accounts() { #[test] fn mempool_capacity() { - let mut mempool = MempoolStore::new(PriorityOpId(0), 5); + let mut mempool = MempoolStore::new(PriorityOpId(0), 4); let account0 = Address::random(); let account1 = Address::random(); let account2 = Address::random(); + let account3 = Address::random(); let transactions = vec![ gen_l2_tx(account0, Nonce(0)), gen_l2_tx(account0, Nonce(1)), gen_l2_tx(account0, Nonce(2)), - gen_l2_tx(account1, Nonce(1)), - gen_l2_tx(account2, Nonce(1)), + gen_l2_tx_with_timestamp(account1, Nonce(0), unix_timestamp_ms() + 1), + gen_l2_tx_with_timestamp(account2, Nonce(0), unix_timestamp_ms() + 2), + gen_l2_tx(account3, Nonce(1)), ]; mempool.insert(transactions, HashMap::new()); - // the mempool is full. Accounts with non-sequential nonces got stashed + // Mempool is full. Accounts with non-sequential nonces and some accounts with lowest score should be purged. assert_eq!( HashSet::<_>::from_iter(mempool.get_mempool_info().purged_accounts), - HashSet::<_>::from_iter(vec![account1, account2]), - ); - // verify that existing good-to-go transactions and new ones got picked - mempool.insert( - vec![gen_l2_tx_with_timestamp( - account1, - Nonce(0), - unix_timestamp_ms() + 1, - )], - HashMap::new(), + HashSet::from([account2, account3]), ); + // verify that good-to-go transactions are kept. for _ in 0..3 { assert_eq!( mempool @@ -363,6 +357,34 @@ fn mempool_capacity() { .initiator_account(), account1 ); + assert!(!mempool.has_next(&L2TxFilter::default())); +} + +#[test] +fn mempool_does_not_purge_all_accounts() { + let mut mempool = MempoolStore::new(PriorityOpId(0), 1); + let account0 = Address::random(); + let account1 = Address::random(); + let transactions = vec![ + gen_l2_tx(account0, Nonce(0)), + gen_l2_tx(account0, Nonce(1)), + gen_l2_tx(account1, Nonce(1)), + ]; + mempool.insert(transactions, HashMap::new()); + // Mempool is full. Account 1 has tx with non-sequential nonce so it should be purged. + // Txs from account 0 have sequential nonces but their number is greater than capacity; they should be kept. + assert_eq!(mempool.get_mempool_info().purged_accounts, vec![account1]); + // verify that good-to-go transactions are kept. + for _ in 0..2 { + assert_eq!( + mempool + .next_transaction(&L2TxFilter::default()) + .unwrap() + .initiator_account(), + account0 + ); + } + assert!(!mempool.has_next(&L2TxFilter::default())); } fn gen_l2_tx(address: Address, nonce: Nonce) -> Transaction { diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index a4d577fc3ba..bb69bda209c 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -9,10 +9,11 @@ use crate::{ consistency::ConsistencyError, storage::{PatchSet, Patched, RocksDBWrapper}, types::{ - Key, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, ValueHash, - TREE_DEPTH, + Key, NodeKey, RawNode, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, + ValueHash, TREE_DEPTH, }, BlockOutput, HashTree, MerkleTree, MerkleTreePruner, MerkleTreePrunerHandle, NoVersionError, + PruneDatabase, }; impl TreeInstruction { @@ -444,6 +445,28 @@ impl ZkSyncTreeReader { self.0.entries_with_proofs(version, keys) } + /// Returns raw nodes for the specified `keys`. + pub fn raw_nodes(&self, keys: &[NodeKey]) -> Vec> { + let raw_nodes = self.0.db.raw_nodes(keys).into_iter(); + raw_nodes + .zip(keys) + .map(|(slice, key)| { + let slice = slice?; + Some(if key.is_empty() { + RawNode::deserialize_root(&slice) + } else { + RawNode::deserialize(&slice) + }) + }) + .collect() + } + + /// Returns raw stale keys obsoleted in the specified version of the tree. + pub fn raw_stale_keys(&self, l1_batch_number: L1BatchNumber) -> Vec { + let version = u64::from(l1_batch_number.0); + self.0.db.stale_keys(version) + } + /// Verifies consistency of the tree at the specified L1 batch number. /// /// # Errors diff --git a/core/lib/merkle_tree/src/errors.rs b/core/lib/merkle_tree/src/errors.rs index b8130717f93..c187ce4977b 100644 --- a/core/lib/merkle_tree/src/errors.rs +++ b/core/lib/merkle_tree/src/errors.rs @@ -22,6 +22,8 @@ pub enum DeserializeErrorKind { /// Bit mask specifying a child kind in an internal tree node is invalid. #[error("invalid bit mask specifying a child kind in an internal tree node")] InvalidChildKind, + #[error("data left after deserialization")] + Leftovers, /// Missing required tag in the tree manifest. #[error("missing required tag `{0}` in tree manifest")] diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 6f9da59cf0e..824f23eaf52 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -82,7 +82,7 @@ mod utils; pub mod unstable { pub use crate::{ errors::DeserializeError, - types::{Manifest, Node, NodeKey, ProfiledTreeOperation, Root}, + types::{Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root}, }; } diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index 711ccaa6137..22335c82940 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -53,6 +53,23 @@ impl NamedColumnFamily for MerkleTreeColumnFamily { type LocalProfiledOperation = RefCell>>; +/// Unifies keys that can be used to load raw data from RocksDB. +pub(crate) trait ToDbKey: Sync { + fn to_db_key(&self) -> Vec; +} + +impl ToDbKey for NodeKey { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(*self) + } +} + +impl ToDbKey for (NodeKey, bool) { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(self.0) + } +} + /// Main [`Database`] implementation wrapping a [`RocksDB`] reference. /// /// # Cloning @@ -112,7 +129,7 @@ impl RocksDBWrapper { .expect("Failed reading from RocksDB") } - fn raw_nodes(&self, keys: &NodeKeys) -> Vec>> { + pub(crate) fn raw_nodes(&self, keys: &[T]) -> Vec>> { // Propagate the currently profiled operation to rayon threads used in the parallel iterator below. let profiled_operation = self .profiled_operation @@ -126,7 +143,7 @@ impl RocksDBWrapper { let _guard = profiled_operation .as_ref() .and_then(ProfiledOperation::start_profiling); - let keys = chunk.iter().map(|(key, _)| key.to_db_key()); + let keys = chunk.iter().map(ToDbKey::to_db_key); let results = self.db.multi_get_cf(MerkleTreeColumnFamily::Tree, keys); results .into_iter() @@ -144,9 +161,9 @@ impl RocksDBWrapper { // If we didn't succeed with the patch set, or the key version is old, // access the underlying storage. let node = if is_leaf { - LeafNode::deserialize(raw_node).map(Node::Leaf) + LeafNode::deserialize(raw_node, false).map(Node::Leaf) } else { - InternalNode::deserialize(raw_node).map(Node::Internal) + InternalNode::deserialize(raw_node, false).map(Node::Internal) }; node.map_err(|err| { err.with_context(if is_leaf { @@ -187,7 +204,7 @@ impl Database for RocksDBWrapper { let Some(raw_root) = self.raw_node(&NodeKey::empty(version).to_db_key()) else { return Ok(None); }; - Root::deserialize(&raw_root) + Root::deserialize(&raw_root, false) .map(Some) .map_err(|err| err.with_context(ErrorContext::Root(version))) } diff --git a/core/lib/merkle_tree/src/storage/serialization.rs b/core/lib/merkle_tree/src/storage/serialization.rs index f21fece94e0..d0c573fd817 100644 --- a/core/lib/merkle_tree/src/storage/serialization.rs +++ b/core/lib/merkle_tree/src/storage/serialization.rs @@ -5,7 +5,7 @@ use std::{collections::HashMap, str}; use crate::{ errors::{DeserializeError, DeserializeErrorKind, ErrorContext}, types::{ - ChildRef, InternalNode, Key, LeafNode, Manifest, Node, Root, TreeTags, ValueHash, + ChildRef, InternalNode, Key, LeafNode, Manifest, Node, RawNode, Root, TreeTags, ValueHash, HASH_SIZE, KEY_SIZE, }, }; @@ -15,7 +15,7 @@ use crate::{ const LEB128_SIZE_ESTIMATE: usize = 3; impl LeafNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < KEY_SIZE + HASH_SIZE { return Err(DeserializeErrorKind::UnexpectedEof.into()); } @@ -26,6 +26,10 @@ impl LeafNode { let leaf_index = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafIndex) })?; + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } + Ok(Self { full_key, value_hash, @@ -105,7 +109,7 @@ impl ChildRef { } impl InternalNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < 4 { let err = DeserializeErrorKind::UnexpectedEof; return Err(err.with_context(ErrorContext::ChildrenMask)); @@ -134,6 +138,9 @@ impl InternalNode { } bitmap >>= 2; } + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } Ok(this) } @@ -161,8 +168,36 @@ impl InternalNode { } } +impl RawNode { + pub(crate) fn deserialize(bytes: &[u8]) -> Self { + Self { + raw: bytes.to_vec(), + leaf: LeafNode::deserialize(bytes, true).ok(), + internal: InternalNode::deserialize(bytes, true).ok(), + } + } + + pub(crate) fn deserialize_root(bytes: &[u8]) -> Self { + let root = Root::deserialize(bytes, true).ok(); + let node = root.and_then(|root| match root { + Root::Empty => None, + Root::Filled { node, .. } => Some(node), + }); + let (leaf, internal) = match node { + None => (None, None), + Some(Node::Leaf(leaf)) => (Some(leaf), None), + Some(Node::Internal(node)) => (None, Some(node)), + }; + Self { + raw: bytes.to_vec(), + leaf, + internal, + } + } +} + impl Root { - pub(super) fn deserialize(mut bytes: &[u8]) -> Result { + pub(super) fn deserialize(mut bytes: &[u8], strict: bool) -> Result { let leaf_count = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafCount) })?; @@ -172,11 +207,11 @@ impl Root { // Try both the leaf and internal node serialization; in some cases, a single leaf // may still be persisted as an internal node. Since serialization of an internal node with a single child // is always shorter than that a leaf, the order (first leaf, then internal node) is chosen intentionally. - LeafNode::deserialize(bytes) + LeafNode::deserialize(bytes, strict) .map(Node::Leaf) - .or_else(|_| InternalNode::deserialize(bytes).map(Node::Internal))? + .or_else(|_| InternalNode::deserialize(bytes, strict).map(Node::Internal))? } - _ => Node::Internal(InternalNode::deserialize(bytes)?), + _ => Node::Internal(InternalNode::deserialize(bytes, strict)?), }; Ok(Self::new(leaf_count, node)) } @@ -440,7 +475,7 @@ mod tests { assert_eq!(buffer[64], 42); // leaf index assert_eq!(buffer.len(), 65); - let leaf_copy = LeafNode::deserialize(&buffer).unwrap(); + let leaf_copy = LeafNode::deserialize(&buffer, true).unwrap(); assert_eq!(leaf_copy, leaf); } @@ -471,7 +506,7 @@ mod tests { let child_count = bitmap.count_ones(); assert_eq!(child_count, 2); - let node_copy = InternalNode::deserialize(&buffer).unwrap(); + let node_copy = InternalNode::deserialize(&buffer, true).unwrap(); assert_eq!(node_copy, node); } @@ -482,7 +517,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer, [0]); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -494,7 +529,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 1); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -506,7 +541,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 2); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } } diff --git a/core/lib/merkle_tree/src/types/internal.rs b/core/lib/merkle_tree/src/types/internal.rs index 399f6c840a3..2db075d9221 100644 --- a/core/lib/merkle_tree/src/types/internal.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -2,7 +2,9 @@ //! some of these types are declared as public and can be even exported using the `unstable` module. //! Still, logically these types are private, so adding them to new public APIs etc. is a logical error. -use std::{collections::HashMap, fmt, num::NonZeroU64}; +use std::{collections::HashMap, fmt, num::NonZeroU64, str::FromStr}; + +use anyhow::Context; use crate::{ hasher::{HashTree, InternalNodeCache}, @@ -276,6 +278,34 @@ impl fmt::Debug for Nibbles { } } +impl FromStr for Nibbles { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + anyhow::ensure!(s.len() <= KEY_SIZE * 2, "too many nibbles"); + let mut bytes = NibblesBytes::default(); + for (i, byte) in s.bytes().enumerate() { + let nibble = match byte { + b'0'..=b'9' => byte - b'0', + b'A'..=b'F' => byte - b'A' + 10, + b'a'..=b'f' => byte - b'a' + 10, + _ => anyhow::bail!("unexpected nibble: {byte:?}"), + }; + + assert!(nibble < 16); + if i % 2 == 0 { + bytes[i / 2] = nibble * 16; + } else { + bytes[i / 2] += nibble; + } + } + Ok(Self { + nibble_count: s.len(), + bytes, + }) + } +} + /// Versioned key in a radix-16 Merkle tree. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct NodeKey { @@ -283,12 +313,31 @@ pub struct NodeKey { pub(crate) nibbles: Nibbles, } -impl fmt::Debug for NodeKey { +impl fmt::Display for NodeKey { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!(formatter, "{}:{}", self.version, self.nibbles) } } +impl fmt::Debug for NodeKey { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, formatter) + } +} + +impl FromStr for NodeKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let (version, nibbles) = s + .split_once(':') + .context("node key does not contain `:` delimiter")?; + let version = version.parse().context("invalid key version")?; + let nibbles = nibbles.parse().context("invalid nibbles")?; + Ok(Self { version, nibbles }) + } +} + impl NodeKey { pub(crate) const fn empty(version: u64) -> Self { Self { @@ -331,19 +380,13 @@ impl NodeKey { } } -impl fmt::Display for NodeKey { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(formatter, "{}:{}", self.version, self.nibbles) - } -} - /// Leaf node of the tree. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] pub struct LeafNode { - pub(crate) full_key: Key, - pub(crate) value_hash: ValueHash, - pub(crate) leaf_index: u64, + pub full_key: Key, + pub value_hash: ValueHash, + pub leaf_index: u64, } impl LeafNode { @@ -364,7 +407,7 @@ impl LeafNode { /// Reference to a child in an [`InternalNode`]. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] -pub(crate) struct ChildRef { +pub struct ChildRef { pub hash: ValueHash, pub version: u64, pub is_leaf: bool, @@ -449,7 +492,7 @@ impl InternalNode { self.cache.get_or_insert(cache) } - pub(crate) fn children(&self) -> impl Iterator + '_ { + pub fn children(&self) -> impl Iterator + '_ { self.children.iter() } @@ -510,6 +553,17 @@ impl From for Node { } } +/// Raw node fetched from a database. +#[derive(Debug)] +pub struct RawNode { + /// Bytes for a serialized node. + pub raw: Vec, + /// Leaf if a node can be deserialized into it. + pub leaf: Option, + /// Internal node if a node can be deserialized into it. + pub internal: Option, +} + /// Root node of the tree. Besides a [`Node`], contains the general information about the tree /// (e.g., the number of leaves). #[derive(Debug, Clone)] @@ -614,15 +668,23 @@ mod tests { fn nibbles_and_node_key_display() { let nibbles = Nibbles::new(&TEST_KEY, 5); assert_eq!(nibbles.to_string(), "deadb"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 6); assert_eq!(nibbles.to_string(), "deadbe"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 9); assert_eq!(nibbles.to_string(), "deadbeef0"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let node_key = nibbles.with_version(3); assert_eq!(node_key.to_string(), "3:deadbeef0"); + let restored: NodeKey = node_key.to_string().parse().unwrap(); + assert_eq!(restored, node_key); } #[test] diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs index 807ae023876..63db4b318b2 100644 --- a/core/lib/merkle_tree/src/types/mod.rs +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -6,7 +6,7 @@ pub(crate) use self::internal::{ ChildRef, Nibbles, NibblesBytes, StaleNodeKey, TreeTags, HASH_SIZE, KEY_SIZE, TREE_DEPTH, }; pub use self::internal::{ - InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, Root, + InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root, }; mod internal; diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index abd3dbbcd3f..fa7ec4cfde3 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -68,6 +68,31 @@ fn basic_workflow() { tree.verify_consistency(L1BatchNumber(0)).unwrap(); assert_eq!(tree.root_hash(), expected_root_hash); assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(1)); + + let keys = ["0:", "0:0"].map(|key| key.parse().unwrap()); + let raw_nodes = tree.reader().raw_nodes(&keys); + assert_eq!(raw_nodes.len(), 2); + let raw_root = raw_nodes[0].as_ref().unwrap(); + assert!(!raw_root.raw.is_empty()); + assert!(raw_root.internal.is_some()); + assert!(raw_root.leaf.is_none()); + + let raw_node = raw_nodes[1].as_ref().unwrap(); + assert!(!raw_node.raw.is_empty()); + assert!(raw_node.leaf.is_none()); + let raw_node = raw_node.internal.as_ref().unwrap(); + + let (nibble, _) = raw_node + .children() + .find(|(_, child_ref)| child_ref.is_leaf) + .unwrap(); + let leaf_key = format!("0:0{nibble:x}").parse().unwrap(); + let raw_nodes = tree.reader().raw_nodes(&[leaf_key]); + assert_eq!(raw_nodes.len(), 1); + let raw_leaf = raw_nodes.into_iter().next().unwrap().expect("no leaf"); + assert!(!raw_leaf.raw.is_empty()); + assert!(raw_leaf.leaf.is_some()); + assert!(raw_leaf.internal.is_none()); } #[test] diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 67a00d064ad..eb770bf9b57 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -28,8 +28,8 @@ zksync_types.workspace = true zksync_contracts.workspace = true zksync_utils.workspace = true zksync_system_constants.workspace = true -zksync_mini_merkle_tree.workspace = true zksync_vm_interface.workspace = true +zksync_mini_merkle_tree.workspace = true anyhow.workspace = true hex.workspace = true @@ -38,10 +38,11 @@ once_cell.workspace = true thiserror.workspace = true tracing.workspace = true vise.workspace = true +ethabi.workspace = true [dev-dependencies] assert_matches.workspace = true -tokio = { workspace = true, features = ["time"] } +pretty_assertions.workspace = true +test-casing.workspace = true zksync_test_account.workspace = true -ethabi.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/README.md b/core/lib/multivm/README.md index 5e2af426ae5..34883db5990 100644 --- a/core/lib/multivm/README.md +++ b/core/lib/multivm/README.md @@ -4,3 +4,17 @@ This crate represents a wrapper over several versions of VM that have been used glue code that allows switching the VM version based on the externally provided marker while preserving the public interface. This crate exists to enable the external node to process breaking upgrades and re-execute all the transactions from the genesis block. + +## Developer guidelines + +### Adding tests + +If you want to add unit tests for the VM wrapper, consider the following: + +- Whenever possible, make tests reusable; declare test logic in the [`testonly`](src/versions/testonly/mod.rs) module, + and then instantiate tests using this logic for the supported VM versions. If necessary, extend the tested VM trait so + that test logic can be defined in a generic way. See the `testonly` module docs for more detailed guidelines. +- If you define a generic test, don't forget to add its instantiations for all supported VMs (`vm_latest`, `vm_fast` and + `shadow`). `shadow` tests allow checking VM divergences for free! +- Do not use an RNG where it can be avoided (e.g., for test contract addresses). +- Avoid using zero / default values in cases they can be treated specially by the tested code. diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index ce928e652d7..50bb19938fe 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -47,6 +47,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -103,6 +104,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -158,6 +160,7 @@ impl GlueFrom for crate::interface: circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -227,6 +230,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -259,6 +263,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -307,6 +312,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs index 3cb61b461a4..4c4cffcc687 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs @@ -22,6 +22,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -48,6 +49,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -74,6 +76,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 2dc680ba77d..8978d4348ed 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -66,12 +66,14 @@ impl GlueFrom VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -100,12 +102,14 @@ impl logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, TxRevertReason::Halt(halt) => VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -129,6 +133,7 @@ impl GlueFrom { unreachable!("Halt is the only revert reason for VM 5") diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index e171a78e179..1cba2c0fb92 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -16,10 +16,11 @@ pub use crate::{ vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_fast, vm_latest, vm_m5, vm_m6, vm_refunds_enhancement, vm_virtual_blocks, }, - vm_instance::{FastVmInstance, LegacyVmInstance}, + vm_instance::{is_supported_by_fast_vm, FastVmInstance, LegacyVmInstance}, }; mod glue; +pub mod pubdata_builders; pub mod tracers; pub mod utils; mod versions; diff --git a/core/lib/multivm/src/pubdata_builders/mod.rs b/core/lib/multivm/src/pubdata_builders/mod.rs new file mode 100644 index 00000000000..c52c4c70c86 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/mod.rs @@ -0,0 +1,24 @@ +use std::rc::Rc; + +pub use rollup::RollupPubdataBuilder; +pub use validium::ValidiumPubdataBuilder; +use zksync_types::commitment::{L1BatchCommitmentMode, PubdataParams}; + +use crate::interface::pubdata::PubdataBuilder; + +mod rollup; +#[cfg(test)] +mod tests; +mod utils; +mod validium; + +pub fn pubdata_params_to_builder(params: PubdataParams) -> Rc { + match params.pubdata_type { + L1BatchCommitmentMode::Rollup => { + Rc::new(RollupPubdataBuilder::new(params.l2_da_validator_address)) + } + L1BatchCommitmentMode::Validium => { + Rc::new(ValidiumPubdataBuilder::new(params.l2_da_validator_address)) + } + } +} diff --git a/core/lib/multivm/src/pubdata_builders/rollup.rs b/core/lib/multivm/src/pubdata_builders/rollup.rs new file mode 100644 index 00000000000..4a818dfe231 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/rollup.rs @@ -0,0 +1,128 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + writes::compress_state_diffs, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct RollupPubdataBuilder { + pub l2_da_validator: Address, +} + +impl RollupPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for RollupPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + if protocol_version.is_pre_gateway() { + let mut operator_input = vec![]; + extend_from_pubdata_input(&mut operator_input, input); + + // Extend with uncompressed state diffs. + operator_input.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + operator_input.extend(state_diff.encode_padded()); + } + + operator_input + } else { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + // Extend with uncompressed state diffs. + pubdata.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + pubdata.extend(state_diff.encode_padded()); + } + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)].concat() + } + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + _protocol_version: ProtocolVersionId, + ) -> Vec { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + pubdata + } +} + +fn extend_from_pubdata_input(buffer: &mut Vec, pubdata_input: &PubdataInput) { + let PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } = pubdata_input; + + // Adding user L2->L1 logs. + buffer.extend(encode_user_logs(user_logs)); + + // Encoding L2->L1 messages + // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` + buffer.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); + for message in l2_to_l1_messages { + buffer.extend((message.len() as u32).to_be_bytes()); + buffer.extend(message); + } + // Encoding bytecodes + // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` + buffer.extend((published_bytecodes.len() as u32).to_be_bytes()); + for bytecode in published_bytecodes { + buffer.extend((bytecode.len() as u32).to_be_bytes()); + buffer.extend(bytecode); + } + // Encoding state diffs + // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` + let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); + buffer.extend(state_diffs_compressed); +} diff --git a/core/lib/multivm/src/pubdata_builders/tests.rs b/core/lib/multivm/src/pubdata_builders/tests.rs new file mode 100644 index 00000000000..bc24b8e4734 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/tests.rs @@ -0,0 +1,123 @@ +use zksync_types::{ + writes::StateDiffRecord, Address, ProtocolVersionId, ACCOUNT_CODE_STORAGE_ADDRESS, + BOOTLOADER_ADDRESS, +}; +use zksync_utils::u256_to_h256; + +use super::{rollup::RollupPubdataBuilder, validium::ValidiumPubdataBuilder}; +use crate::interface::pubdata::{L1MessengerL2ToL1Log, PubdataBuilder, PubdataInput}; + +fn mock_input() -> PubdataInput { + // Just using some constant addresses for tests + let addr1 = BOOTLOADER_ADDRESS; + let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; + + let user_logs = vec![L1MessengerL2ToL1Log { + l2_shard_id: 0, + is_service: false, + tx_number_in_block: 0, + sender: addr1, + key: 1.into(), + value: 128.into(), + }]; + + let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; + + let published_bytecodes = vec![hex::decode("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb").unwrap()]; + + // For covering more cases, we have two state diffs: + // One with enumeration index present (and so it is a repeated write) and the one without it. + let state_diffs = vec![ + StateDiffRecord { + address: addr2, + key: 155.into(), + derived_key: u256_to_h256(125.into()).0, + enumeration_index: 12, + initial_value: 11.into(), + final_value: 12.into(), + }, + StateDiffRecord { + address: addr2, + key: 156.into(), + derived_key: u256_to_h256(126.into()).0, + enumeration_index: 0, + initial_value: 0.into(), + final_value: 14.into(), + }, + ]; + + PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } +} + +#[test] +fn test_rollup_pubdata_building() { + let input = mock_input(); + + let rollup_pubdata_builder = RollupPubdataBuilder::new(Address::zero()); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000032300000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (post gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (post gateway)" + ); +} + +#[test] +fn test_validium_pubdata_building() { + let input = mock_input(); + + let validium_pubdata_builder = ValidiumPubdataBuilder::new(Address::zero()); + + let actual = + validium_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000005c000000010000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input`" + ); + + let actual = + validium_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "fa96e2436e6fb4d668f5a06681a7c53fcb199b2747ee624ee52a13e85aac5f1e"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata`" + ); +} diff --git a/core/lib/multivm/src/pubdata_builders/utils.rs b/core/lib/multivm/src/pubdata_builders/utils.rs new file mode 100644 index 00000000000..57361a674fb --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/utils.rs @@ -0,0 +1,70 @@ +use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_types::web3::keccak256; +use zksync_utils::bytecode::hash_bytecode; + +use crate::interface::pubdata::L1MessengerL2ToL1Log; + +pub(crate) fn build_chained_log_hash(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + let mut chained_log_hash = vec![0u8; 32]; + + for log in user_logs { + let log_bytes = log.packed_encoding(); + let hash = keccak256(&log_bytes); + + chained_log_hash = keccak256(&[chained_log_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_log_hash +} + +pub(crate) fn build_logs_root( + user_logs: &[L1MessengerL2ToL1Log], + l2_to_l1_logs_tree_size: usize, +) -> Vec { + let logs = user_logs.iter().map(|log| { + let encoded = log.packed_encoding(); + let mut slice = [0u8; 88]; + slice.copy_from_slice(&encoded); + slice + }); + MiniMerkleTree::new(logs, Some(l2_to_l1_logs_tree_size)) + .merkle_root() + .as_bytes() + .to_vec() +} + +pub(crate) fn build_chained_message_hash(l2_to_l1_messages: &[Vec]) -> Vec { + let mut chained_msg_hash = vec![0u8; 32]; + + for msg in l2_to_l1_messages { + let hash = keccak256(msg); + + chained_msg_hash = keccak256(&[chained_msg_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_msg_hash +} + +pub(crate) fn build_chained_bytecode_hash(published_bytecodes: &[Vec]) -> Vec { + let mut chained_bytecode_hash = vec![0u8; 32]; + + for bytecode in published_bytecodes { + let hash = hash_bytecode(bytecode).to_fixed_bytes(); + + chained_bytecode_hash = + keccak256(&[chained_bytecode_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_bytecode_hash +} + +pub(crate) fn encode_user_logs(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + // Encoding user L2->L1 logs. + // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` + let mut result = vec![]; + result.extend((user_logs.len() as u32).to_be_bytes()); + for l2tol1log in user_logs { + result.extend(l2tol1log.packed_encoding()); + } + result +} diff --git a/core/lib/multivm/src/pubdata_builders/validium.rs b/core/lib/multivm/src/pubdata_builders/validium.rs new file mode 100644 index 00000000000..a9156e970aa --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/validium.rs @@ -0,0 +1,93 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + web3::keccak256, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct ValidiumPubdataBuilder { + pub l2_da_validator: Address, +} + +impl ValidiumPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for ValidiumPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let mut pubdata = vec![]; + pubdata.extend(encode_user_logs(&input.user_logs)); + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)] + .concat() + .to_vec() + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let state_diffs_packed = input + .state_diffs + .iter() + .flat_map(|diff| diff.encode_padded()) + .collect::>(); + + keccak256(&state_diffs_packed).to_vec() + } +} diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index a1573f24c66..057551a9efe 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -1,4 +1,8 @@ -use std::{collections::HashSet, marker::PhantomData, sync::Arc}; +use std::{ + collections::{BTreeSet, HashSet}, + marker::PhantomData, + sync::Arc, +}; use once_cell::sync::OnceCell; use zksync_system_constants::{ @@ -8,7 +12,7 @@ use zksync_system_constants::{ use zksync_types::{ vm::VmVersion, web3::keccak256, AccountTreeId, Address, StorageKey, H256, U256, }; -use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; +use zksync_utils::{address_to_u256, be_bytes_to_safe_address, u256_to_h256}; use self::types::{NewTrustedValidationItems, ValidationTracerMode}; use crate::{ @@ -32,7 +36,7 @@ mod vm_virtual_blocks; #[derive(Debug, Clone)] pub struct ValidationTracer { validation_mode: ValidationTracerMode, - auxilary_allowed_slots: HashSet, + auxilary_allowed_slots: BTreeSet, user_address: Address, #[allow(dead_code)] @@ -51,6 +55,8 @@ pub struct ValidationTracer { type ValidationRoundResult = Result; impl ValidationTracer { + const MAX_ALLOWED_SLOT_OFFSET: u32 = 127; + pub fn new( params: ValidationParams, vm_version: VmVersion, @@ -131,9 +137,15 @@ impl ValidationTracer { } // The user is allowed to touch its own slots or slots semantically related to him. + let from = u256_to_h256(key.saturating_sub(Self::MAX_ALLOWED_SLOT_OFFSET.into())); + let to = u256_to_h256(key); let valid_users_slot = address == self.user_address - || u256_to_account_address(&key) == self.user_address - || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); + || key == address_to_u256(&self.user_address) + || self + .auxilary_allowed_slots + .range(from..=to) + .next() + .is_some(); if valid_users_slot { return true; } diff --git a/core/lib/multivm/src/utils/events.rs b/core/lib/multivm/src/utils/events.rs index 9720cb77914..d84651989e7 100644 --- a/core/lib/multivm/src/utils/events.rs +++ b/core/lib/multivm/src/utils/events.rs @@ -1,59 +1,10 @@ use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ ethabi::{self, Token}, - l2_to_l1_log::L2ToL1Log, - Address, H256, U256, + H256, U256, }; -use zksync_utils::{u256_to_bytes_be, u256_to_h256}; -use crate::interface::VmEvent; - -/// Corresponds to the following solidity event: -/// ```solidity -/// struct L2ToL1Log { -/// uint8 l2ShardId; -/// bool isService; -/// uint16 txNumberInBlock; -/// address sender; -/// bytes32 key; -/// bytes32 value; -/// } -/// ``` -#[derive(Debug, Default, Clone, PartialEq)] -pub(crate) struct L1MessengerL2ToL1Log { - pub l2_shard_id: u8, - pub is_service: bool, - pub tx_number_in_block: u16, - pub sender: Address, - pub key: U256, - pub value: U256, -} - -impl L1MessengerL2ToL1Log { - pub fn packed_encoding(&self) -> Vec { - let mut res: Vec = vec![]; - res.push(self.l2_shard_id); - res.push(self.is_service as u8); - res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); - res.extend_from_slice(self.sender.as_bytes()); - res.extend(u256_to_bytes_be(&self.key)); - res.extend(u256_to_bytes_be(&self.value)); - res - } -} - -impl From for L2ToL1Log { - fn from(log: L1MessengerL2ToL1Log) -> Self { - L2ToL1Log { - shard_id: log.l2_shard_id, - is_service: log.is_service, - tx_number_in_block: log.tx_number_in_block, - sender: log.sender, - key: u256_to_h256(log.key), - value: u256_to_h256(log.value), - } - } -} +use crate::interface::{pubdata::L1MessengerL2ToL1Log, VmEvent}; #[derive(Debug, PartialEq)] pub(crate) struct L1MessengerBytecodePublicationRequest { @@ -142,7 +93,8 @@ mod tests { use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, }; - use zksync_types::L1BatchNumber; + use zksync_types::{Address, L1BatchNumber}; + use zksync_utils::u256_to_h256; use super::*; diff --git a/core/lib/multivm/src/utils/mod.rs b/core/lib/multivm/src/utils/mod.rs index 44ed004adc2..a55adb16c85 100644 --- a/core/lib/multivm/src/utils/mod.rs +++ b/core/lib/multivm/src/utils/mod.rs @@ -248,7 +248,7 @@ pub fn get_bootloader_encoding_space(version: VmVersion) -> u32 { ) } VmVersion::VmGateway => crate::vm_latest::constants::get_bootloader_tx_encoding_space( - crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, + crate::vm_latest::MultiVMSubversion::Gateway, ), } } @@ -397,11 +397,14 @@ pub fn get_used_bootloader_memory_bytes(version: VmVersion) -> usize { crate::vm_latest::MultiVMSubversion::SmallBootloaderMemory, ) } - VmVersion::Vm1_5_0IncreasedBootloaderMemory | VmVersion::VmGateway => { + VmVersion::Vm1_5_0IncreasedBootloaderMemory => { crate::vm_latest::constants::get_used_bootloader_memory_bytes( crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } @@ -430,11 +433,14 @@ pub fn get_used_bootloader_memory_words(version: VmVersion) -> usize { crate::vm_latest::MultiVMSubversion::SmallBootloaderMemory, ) } - VmVersion::Vm1_5_0IncreasedBootloaderMemory | VmVersion::VmGateway => { + VmVersion::Vm1_5_0IncreasedBootloaderMemory => { crate::vm_latest::constants::get_used_bootloader_memory_bytes( crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } diff --git a/core/lib/multivm/src/versions/README.md b/core/lib/multivm/src/versions/README.md deleted file mode 100644 index 01c57509197..00000000000 --- a/core/lib/multivm/src/versions/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# MultiVM dependencies - -This folder contains the old versions of the VM we have used in the past. The `multivm` crate uses them to dynamically -switch the version we use to be able to sync from the genesis. This is a temporary measure until a "native" solution is -implemented (i.e., the `vm` crate would itself know the changes between versions, and thus we will have only the -functional diff between versions, not several fully-fledged VMs). - -## Versions - -| Name | Protocol versions | Description | -| ---------------------- | ----------------- | --------------------------------------------------------------------- | -| vm_m5 | 0 - 3 | Release for the testnet launch | -| vm_m6 | 4 - 6 | Release for the mainnet launch | -| vm_1_3_2 | 7 - 12 | Release 1.3.2 of the crypto circuits | -| vm_virtual_blocks | 13 - 15 | Adding virtual blocks to help with block number / timestamp migration | -| vm_refunds_enhancement | 16 - 17 | Fixing issue related to refunds in VM | -| vm_boojum_integration | 18 - | New Proving system (boojum), vm version 1.4.0 | diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index bcb246cece4..b6523b3d474 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,8 +1,8 @@ +#[cfg(test)] +mod shadow; mod shared; #[cfg(test)] mod testonly; -#[cfg(test)] -mod tests; pub mod vm_1_3_2; pub mod vm_1_4_1; pub mod vm_1_4_2; diff --git a/core/lib/multivm/src/versions/tests.rs b/core/lib/multivm/src/versions/shadow/mod.rs similarity index 95% rename from core/lib/multivm/src/versions/tests.rs rename to core/lib/multivm/src/versions/shadow/mod.rs index ea009b450c8..350caafabe1 100644 --- a/core/lib/multivm/src/versions/tests.rs +++ b/core/lib/multivm/src/versions/shadow/mod.rs @@ -22,14 +22,16 @@ use crate::{ }, utils::get_max_gas_per_pubdata_byte, versions::testonly::{ - default_l1_batch, default_system_env, make_account_rich, ContractToDeploy, + default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, }, - vm_fast, - vm_latest::{self, HistoryEnabled}, + vm_latest, + vm_latest::HistoryEnabled, }; +mod tests; + type ReferenceVm = vm_latest::Vm, HistoryEnabled>; -// type ShadowedFastVm = crate::vm_instance::ShadowedFastVm; +type ShadowedFastVm = crate::vm_instance::ShadowedFastVm; fn hash_block(block_env: L2BlockEnv, tx_hashes: &[H256]) -> H256 { let mut hasher = L2BlockHasher::new( @@ -70,8 +72,8 @@ impl Harness { fn new(l1_batch_env: &L1BatchEnv) -> Self { Self { - alice: Account::random(), - bob: Account::random(), + alice: Account::from_seed(0), + bob: Account::from_seed(1), storage_contract: ContractToDeploy::new( read_bytecode(Self::STORAGE_CONTRACT_PATH), Self::STORAGE_CONTRACT_ADDRESS, @@ -82,8 +84,8 @@ impl Harness { } fn setup_storage(&self, storage: &mut InMemoryStorage) { - make_account_rich(storage, &self.alice); - make_account_rich(storage, &self.bob); + make_address_rich(storage, self.alice.address); + make_address_rich(storage, self.bob.address); self.storage_contract.insert(storage); let storage_contract_key = StorageKey::new( @@ -196,7 +198,6 @@ impl Harness { assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); self.new_block(vm, &[deploy_tx.tx.hash(), load_test_tx.hash()]); - vm.finish_batch(); } } @@ -221,10 +222,10 @@ fn sanity_check_harness() { sanity_check_vm::(); } -#[test] -fn sanity_check_harness_on_new_vm() { - sanity_check_vm::>(); -} +// #[test] +// fn sanity_check_harness_on_new_vm() { +// sanity_check_vm::>(); +// } #[test] fn sanity_check_shadow_vm() { diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs new file mode 100644 index 00000000000..78fd29809a9 --- /dev/null +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -0,0 +1,427 @@ +//! Unit tests from the `testonly` test suite. + +use std::{collections::HashSet, rc::Rc}; + +use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H256, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; + +use super::ShadowedFastVm; +use crate::{ + interface::{ + utils::{ShadowMut, ShadowRef}, + CurrentExecutionState, L2BlockEnv, VmExecutionResultAndLogs, + }, + versions::testonly::TestedVm, +}; + +impl TestedVm for ShadowedFastVm { + type StateDump = (); + + fn dump_state(&self) -> Self::StateDump { + // Do nothing + } + + fn gas_remaining(&mut self) -> u32 { + self.get_mut("gas_remaining", |r| match r { + ShadowMut::Main(vm) => vm.gas_remaining(), + ShadowMut::Shadow(vm) => vm.gas_remaining(), + }) + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_custom("current_execution_state", |r| match r { + ShadowRef::Main(vm) => vm.get_current_execution_state(), + ShadowRef::Shadow(vm) => vm.get_current_execution_state(), + }) + } + + fn decommitted_hashes(&self) -> HashSet { + self.get("decommitted_hashes", |r| match r { + ShadowRef::Main(vm) => vm.decommitted_hashes(), + ShadowRef::Shadow(vm) => TestedVm::decommitted_hashes(vm), + }) + } + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs { + self.get_custom_mut("finish_batch_with_state_diffs", |r| match r { + ShadowMut::Main(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + ShadowMut::Shadow(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + }) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.get_custom_mut("finish_batch_without_pubdata", |r| match r { + ShadowMut::Main(vm) => vm.finish_batch_without_pubdata(), + ShadowMut::Shadow(vm) => vm.finish_batch_without_pubdata(), + }) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + self.get_mut("insert_bytecodes", |r| match r { + ShadowMut::Main(vm) => vm.insert_bytecodes(bytecodes), + ShadowMut::Shadow(vm) => TestedVm::insert_bytecodes(vm, bytecodes), + }); + } + + fn known_bytecode_hashes(&self) -> HashSet { + self.get("known_bytecode_hashes", |r| match r { + ShadowRef::Main(vm) => vm.known_bytecode_hashes(), + ShadowRef::Shadow(vm) => vm.known_bytecode_hashes(), + }) + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + self.get_mut("manually_decommit", |r| match r { + ShadowMut::Main(vm) => vm.manually_decommit(code_hash), + ShadowMut::Shadow(vm) => vm.manually_decommit(code_hash), + }) + } + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]) { + self.get("verify_required_bootloader_heap", |r| match r { + ShadowRef::Main(vm) => vm.verify_required_bootloader_heap(cells), + ShadowRef::Shadow(vm) => vm.verify_required_bootloader_heap(cells), + }); + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + self.get_mut("manually_decommit", |r| match r { + ShadowMut::Main(vm) => vm.write_to_bootloader_heap(cells), + ShadowMut::Shadow(vm) => TestedVm::write_to_bootloader_heap(vm, cells), + }); + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + self.get_mut("read_storage", |r| match r { + ShadowMut::Main(vm) => vm.read_storage(key), + ShadowMut::Shadow(vm) => vm.read_storage(key), + }) + } + + fn last_l2_block_hash(&self) -> H256 { + self.get("last_l2_block_hash", |r| match r { + ShadowRef::Main(vm) => vm.last_l2_block_hash(), + ShadowRef::Shadow(vm) => vm.last_l2_block_hash(), + }) + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.get_mut("push_l2_block_unchecked", |r| match r { + ShadowMut::Main(vm) => vm.push_l2_block_unchecked(block), + ShadowMut::Shadow(vm) => vm.push_l2_block_unchecked(block), + }); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.get_mut("push_transaction_with_refund", |r| match r { + ShadowMut::Main(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + ShadowMut::Shadow(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + }); + } +} + +// mod block_tip { +// use crate::versions::testonly::block_tip::*; +// +// #[test] +// fn dry_run_upper_bound() { +// test_dry_run_upper_bound::(); +// } +// } +// +// mod bootloader { +// use crate::versions::testonly::bootloader::*; +// +// #[test] +// fn dummy_bootloader() { +// test_dummy_bootloader::(); +// } +// +// #[test] +// fn bootloader_out_of_gas() { +// test_bootloader_out_of_gas::(); +// } +// } +// +// mod bytecode_publishing { +// use crate::versions::testonly::bytecode_publishing::*; +// +// #[test] +// fn bytecode_publishing() { +// test_bytecode_publishing::(); +// } +// } +// +// mod circuits { +// use crate::versions::testonly::circuits::*; +// +// #[test] +// fn circuits() { +// test_circuits::(); +// } +// } +// +// mod code_oracle { +// use crate::versions::testonly::code_oracle::*; +// +// #[test] +// fn code_oracle() { +// test_code_oracle::(); +// } +// +// #[test] +// fn code_oracle_big_bytecode() { +// test_code_oracle_big_bytecode::(); +// } +// +// #[test] +// fn refunds_in_code_oracle() { +// test_refunds_in_code_oracle::(); +// } +// } +// +// mod default_aa { +// use crate::versions::testonly::default_aa::*; +// +// #[test] +// fn default_aa_interaction() { +// test_default_aa_interaction::(); +// } +// } +// +// mod gas_limit { +// use crate::versions::testonly::gas_limit::*; +// +// #[test] +// fn tx_gas_limit_offset() { +// test_tx_gas_limit_offset::(); +// } +// } +// +// mod get_used_contracts { +// use crate::versions::testonly::get_used_contracts::*; +// +// #[test] +// fn get_used_contracts() { +// test_get_used_contracts::(); +// } +// +// #[test] +// fn get_used_contracts_with_far_call() { +// test_get_used_contracts_with_far_call::(); +// } +// +// #[test] +// fn get_used_contracts_with_out_of_gas_far_call() { +// test_get_used_contracts_with_out_of_gas_far_call::(); +// } +// } +// +// mod is_write_initial { +// use crate::versions::testonly::is_write_initial::*; +// +// #[test] +// fn is_write_initial_behaviour() { +// test_is_write_initial_behaviour::(); +// } +// } +// +// mod l1_tx_execution { +// use crate::versions::testonly::l1_tx_execution::*; +// +// #[test] +// fn l1_tx_execution() { +// test_l1_tx_execution::(); +// } +// +// #[test] +// fn l1_tx_execution_high_gas_limit() { +// test_l1_tx_execution_high_gas_limit::(); +// } +// } +// +// mod l2_blocks { +// use crate::versions::testonly::l2_blocks::*; +// +// #[test] +// fn l2_block_initialization_timestamp() { +// test_l2_block_initialization_timestamp::(); +// } +// +// #[test] +// fn l2_block_initialization_number_non_zero() { +// test_l2_block_initialization_number_non_zero::(); +// } +// +// #[test] +// fn l2_block_same_l2_block() { +// test_l2_block_same_l2_block::(); +// } +// +// #[test] +// fn l2_block_new_l2_block() { +// test_l2_block_new_l2_block::(); +// } +// +// #[test] +// fn l2_block_first_in_batch() { +// test_l2_block_first_in_batch::(); +// } +// } +// +// mod nonce_holder { +// use crate::versions::testonly::nonce_holder::*; +// +// #[test] +// fn nonce_holder() { +// test_nonce_holder::(); +// } +// } +// +// mod precompiles { +// use crate::versions::testonly::precompiles::*; +// +// #[test] +// fn keccak() { +// test_keccak::(); +// } +// +// #[test] +// fn sha256() { +// test_sha256::(); +// } +// +// #[test] +// fn ecrecover() { +// test_ecrecover::(); +// } +// } +// +// mod refunds { +// use crate::versions::testonly::refunds::*; +// +// #[test] +// fn predetermined_refunded_gas() { +// test_predetermined_refunded_gas::(); +// } +// +// #[test] +// fn negative_pubdata_for_transaction() { +// test_negative_pubdata_for_transaction::(); +// } +// } +// +// mod require_eip712 { +// use crate::versions::testonly::require_eip712::*; +// +// #[test] +// fn require_eip712() { +// test_require_eip712::(); +// } +// } +// +// mod rollbacks { +// use crate::versions::testonly::rollbacks::*; +// +// #[test] +// fn vm_rollbacks() { +// test_vm_rollbacks::(); +// } +// +// #[test] +// fn vm_loadnext_rollbacks() { +// test_vm_loadnext_rollbacks::(); +// } +// +// #[test] +// fn rollback_in_call_mode() { +// test_rollback_in_call_mode::(); +// } +// } +// +// mod secp256r1 { +// use crate::versions::testonly::secp256r1::*; +// +// #[test] +// fn secp256r1() { +// test_secp256r1::(); +// } +// } +// +// mod simple_execution { +// use crate::versions::testonly::simple_execution::*; +// +// #[test] +// fn estimate_fee() { +// test_estimate_fee::(); +// } +// +// #[test] +// fn simple_execute() { +// test_simple_execute::(); +// } +// } +// +// mod storage { +// use crate::versions::testonly::storage::*; +// +// #[test] +// fn storage_behavior() { +// test_storage_behavior::(); +// } +// +// #[test] +// fn transient_storage_behavior() { +// test_transient_storage_behavior::(); +// } +// } +// +// mod tracing_execution_error { +// use crate::versions::testonly::tracing_execution_error::*; +// +// #[test] +// fn tracing_of_execution_errors() { +// test_tracing_of_execution_errors::(); +// } +// } +// +// mod transfer { +// use crate::versions::testonly::transfer::*; +// +// #[test] +// fn send_and_transfer() { +// test_send_and_transfer::(); +// } +// +// #[test] +// fn reentrancy_protection_send_and_transfer() { +// test_reentrancy_protection_send_and_transfer::(); +// } +// } +// +// mod upgrade { +// use crate::versions::testonly::upgrade::*; +// +// #[test] +// fn protocol_upgrade_is_first() { +// test_protocol_upgrade_is_first::(); +// } +// +// #[test] +// fn force_deploy_upgrade() { +// test_force_deploy_upgrade::(); +// } +// +// #[test] +// fn complex_upgrader() { +// test_complex_upgrader::(); +// } +// } diff --git a/core/lib/multivm/src/versions/testonly.rs b/core/lib/multivm/src/versions/testonly.rs deleted file mode 100644 index adfdbd0b327..00000000000 --- a/core/lib/multivm/src/versions/testonly.rs +++ /dev/null @@ -1,96 +0,0 @@ -use zksync_contracts::BaseSystemContracts; -use zksync_test_account::Account; -use zksync_types::{ - block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, utils::storage_key_for_eth_balance, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{storage::InMemoryStorage, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, -}; - -pub(super) fn default_system_env() -> SystemEnv { - SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), - } -} - -pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(super) fn make_account_rich(storage: &mut InMemoryStorage, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); -} - -#[derive(Debug, Clone)] -pub(super) struct ContractToDeploy { - bytecode: Vec, - address: Address, - is_account: bool, -} - -impl ContractToDeploy { - pub fn new(bytecode: Vec, address: Address) -> Self { - Self { - bytecode, - address, - is_account: false, - } - } - - // FIXME: restore this method if needed in the main branch - // pub fn account(bytecode: Vec, address: Address) -> Self { - // Self { - // bytecode, - // address, - // is_account: true, - // } - // } - - pub fn insert(&self, storage: &mut InMemoryStorage) { - let deployer_code_key = get_code_key(&self.address); - storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); - if self.is_account { - let is_account_key = get_is_account_key(&self.address); - storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); - } - - // FIXME: restore this method if needed in the main branch - // /// Inserts the contracts into the test environment, bypassing the deployer system contract. - // pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { - // for contract in contracts { - // contract.insert(storage); - // } - // } -} diff --git a/core/lib/multivm/src/versions/testonly/block_tip.rs b/core/lib/multivm/src/versions/testonly/block_tip.rs new file mode 100644 index 00000000000..220653308a7 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/block_tip.rs @@ -0,0 +1,390 @@ +use ethabi::Token; +use itertools::Itertools; +use zksync_contracts::load_sys_contract; +use zksync_system_constants::{ + CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, +}; +use zksync_types::{ + commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use super::{ + default_pubdata_builder, get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, + tester::{TestedVm, VmTesterBuilder}, +}; +use crate::{ + interface::{InspectExecutionMode, L1BatchEnv, TxExecutionMode, VmInterfaceExt}, + versions::testonly::default_l1_batch, + vm_latest::constants::{ + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, + MAX_VM_PUBDATA_PER_BATCH, + }, +}; + +#[derive(Debug, Clone, Default)] +struct L1MessengerTestData { + l2_to_l1_logs: usize, + messages: Vec>, + bytecodes: Vec>, + state_diffs: Vec, +} + +struct MimicCallInfo { + to: Address, + who_to_mimic: Address, + data: Vec, +} + +const CALLS_PER_TX: usize = 1_000; + +fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { + let complex_upgrade = get_complex_upgrade_abi(); + let l1_messenger = load_sys_contract("L1Messenger"); + + let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|i| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendL2ToL1Log") + .unwrap() + .encode_input(&[ + Token::Bool(false), + Token::FixedBytes(H256::from_low_u64_be(2 * i as u64).0.to_vec()), + Token::FixedBytes(H256::from_low_u64_be(2 * i as u64 + 1).0.to_vec()), + ]) + .unwrap(), + }); + let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendToL1") + .unwrap() + .encode_input(&[Token::Bytes(message.clone())]) + .unwrap(), + }); + let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("requestBytecodeL1Publication") + .unwrap() + .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) + .unwrap(), + }); + + let encoded_calls = logs_mimic_calls + .chain(messages_mimic_calls) + .chain(bytecodes_mimic_calls) + .map(|call| { + Token::Tuple(vec![ + Token::Address(call.to), + Token::Address(call.who_to_mimic), + Token::Bytes(call.data), + ]) + }) + .chunks(CALLS_PER_TX) + .into_iter() + .map(|chunk| { + complex_upgrade + .function("mimicCalls") + .unwrap() + .encode_input(&[Token::Array(chunk.collect_vec())]) + .unwrap() + }) + .collect_vec(); + + encoded_calls +} + +struct TestStatistics { + pub max_used_gas: u32, + pub circuit_statistics: u64, + pub execution_metrics_size: u64, +} + +struct StatisticsTagged { + pub statistics: TestStatistics, + pub tag: String, +} + +fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { + let mut storage = get_empty_storage(); + let complex_upgrade_code = read_complex_upgrade(); + + // For this test we'll just put the bytecode onto the force deployer address + storage.set_value( + get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), + hash_bytecode(&complex_upgrade_code), + ); + storage.store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); + + // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute + // the gas limit + + let batch_env = L1BatchEnv { + fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), + ..default_l1_batch(zksync_types::L1BatchNumber(1)) + }; + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_l1_batch_env(batch_env) + .build::(); + + let bytecodes: Vec<_> = test_data.bytecodes.iter().map(Vec::as_slice).collect(); + vm.vm.insert_bytecodes(&bytecodes); + + let txs_data = populate_mimic_calls(test_data.clone()); + let account = &mut vm.rich_accounts[0]; + + for (i, data) in txs_data.into_iter().enumerate() { + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), + calldata: data, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction {i} wasn't successful for input: {:#?}", + test_data + ); + } + + // Now we count how much ergs were spent at the end of the batch + // It is assumed that the top level frame is the bootloader + let gas_before = vm.vm.gas_remaining(); + let result = vm + .vm + .finish_batch_with_state_diffs(test_data.state_diffs.clone(), default_pubdata_builder()); + assert!( + !result.result.is_failed(), + "Batch wasn't successful for input: {test_data:?}" + ); + let gas_after = vm.vm.gas_remaining(); + assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); + + TestStatistics { + max_used_gas: gas_before - gas_after, + circuit_statistics: result.statistics.circuit_statistic.total() as u64, + execution_metrics_size: result.get_execution_metrics(None).size() as u64, + } +} + +fn generate_state_diffs( + repeated_writes: bool, + small_diff: bool, + number_of_state_diffs: usize, +) -> Vec { + (0..number_of_state_diffs) + .map(|i| { + let address = Address::from_low_u64_be(i as u64); + let key = U256::from(i); + let enumeration_index = if repeated_writes { i + 1 } else { 0 }; + + let (initial_value, final_value) = if small_diff { + // As small as it gets, one byte to denote zeroing out the value + (U256::from(1), U256::from(0)) + } else { + // As large as it gets + (U256::from(0), U256::from(2).pow(255.into())) + }; + + StateDiffRecord { + address, + key, + derived_key: u256_to_h256(i.into()).0, + enumeration_index: enumeration_index as u64, + initial_value, + final_value, + } + }) + .collect() +} + +// A valid zkEVM bytecode has odd number of 32 byte words +fn get_valid_bytecode_length(length: usize) -> usize { + // Firstly ensure that the length is divisible by 32 + let length_padded_to_32 = if length % 32 == 0 { + length + } else { + length + 32 - (length % 32) + }; + + // Then we ensure that the number returned by division by 32 is odd + if length_padded_to_32 % 64 == 0 { + length_padded_to_32 + 32 + } else { + length_padded_to_32 + } +} + +pub(crate) fn test_dry_run_upper_bound() { + // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). + // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` + // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. + const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = + (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; + + // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. + // To get the upper bound, we'll try to do the following: + // 1. Max number of logs. + // 2. Lots of small L2->L1 messages / one large L2->L1 message. + // 3. Lots of small bytecodes / one large bytecode. + // 4. Lots of storage slot updates. + + let statistics = vec![ + // max logs + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, + ..Default::default() + }), + tag: "max_logs".to_string(), + }, + // max messages + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, + // so the max number of pubdata is bound by it + messages: vec![ + vec![0; 0]; + MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) + ], + ..Default::default() + }), + tag: "max_messages".to_string(), + }, + // long message + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it + messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], + ..Default::default() + }), + tag: "long_message".to_string(), + }, + // max bytecodes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each bytecode must be at least 32 bytes long. + // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number + bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], + ..Default::default() + }), + tag: "max_bytecodes".to_string(), + }, + // long bytecode + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + bytecodes: vec![ + vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; + 1 + ], + ..Default::default() + }), + tag: "long_bytecode".to_string(), + }, + // lots of small repeated writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) + state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), + ..Default::default() + }), + tag: "small_repeated_writes".to_string(), + }, + // lots of big repeated writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + true, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, + ), + ..Default::default() + }), + tag: "big_repeated_writes".to_string(), + }, + // lots of small initial writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out + state_diffs: generate_state_diffs( + false, + true, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, + ), + ..Default::default() + }), + tag: "small_initial_writes".to_string(), + }, + // lots of large initial writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + false, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, + ), + ..Default::default() + }), + tag: "big_initial_writes".to_string(), + }, + ]; + + // We use 2x overhead for the batch tip compared to the worst estimated scenario. + let max_used_gas = statistics + .iter() + .map(|s| (s.statistics.max_used_gas, s.tag.clone())) + .max() + .unwrap(); + assert!( + max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, + "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", + max_used_gas.1, + max_used_gas.0, + BOOTLOADER_BATCH_TIP_OVERHEAD + ); + + let circuit_statistics = statistics + .iter() + .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) + .max() + .unwrap(); + assert!( + circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", + circuit_statistics.1, + circuit_statistics.0, + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD + ); + + let execution_metrics_size = statistics + .iter() + .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) + .max() + .unwrap(); + assert!( + execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", + execution_metrics_size.1, + execution_metrics_size.0, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD + ); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs b/core/lib/multivm/src/versions/testonly/bootloader.rs similarity index 53% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs rename to core/lib/multivm/src/versions/testonly/bootloader.rs index 8d69d05c444..4b9b63252d6 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/testonly/bootloader.rs @@ -1,50 +1,39 @@ +use assert_matches::assert_matches; use zksync_types::U256; -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - }, -}; - -#[test] -fn test_dummy_bootloader() { +use super::{get_bootloader, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; +use crate::interface::{ExecutionResult, Halt, TxExecutionMode}; + +pub(crate) fn test_dummy_bootloader() { let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); base_system_contracts.bootloader = get_bootloader("dummy"); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(base_system_contracts) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); - let result = vm.vm.execute(VmExecutionMode::Batch); + let result = vm.vm.finish_batch_without_pubdata(); assert!(!result.result.is_failed()); let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); + vm.vm + .verify_required_bootloader_heap(&[(0, correct_first_cell)]); } -#[test] -fn test_bootloader_out_of_gas() { +pub(crate) fn test_bootloader_out_of_gas() { let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); base_system_contracts.bootloader = get_bootloader("dummy"); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(base_system_contracts) .with_bootloader_gas_limit(10) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); - let res = vm.vm.execute(VmExecutionMode::Batch); + let res = vm.vm.finish_batch_without_pubdata(); assert_matches!( res.result, diff --git a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs new file mode 100644 index 00000000000..9da005b995d --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs @@ -0,0 +1,44 @@ +use zksync_test_account::TxType; + +use super::{default_pubdata_builder, read_test_contract, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmEvent, VmInterfaceExt}, + utils::bytecode, +}; + +pub(crate) fn test_bytecode_publishing() { + // In this test, we aim to ensure that the contents of the compressed bytecodes + // are included as part of the L2->L1 long messages + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; + + let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; + assert_eq!(tx.execute.factory_deps.len(), 1); // The deployed bytecode is the only dependency + let push_result = vm.vm.push_transaction(tx); + assert_eq!(push_result.compressed_bytecodes.len(), 1); + assert_eq!(push_result.compressed_bytecodes[0].original, counter); + assert_eq!( + push_result.compressed_bytecodes[0].compressed, + compressed_bytecode + ); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.finish_batch(default_pubdata_builder()); + + let state = vm.vm.get_current_execution_state(); + let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); + assert!( + long_messages.contains(&compressed_bytecode), + "Bytecode not published" + ); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs b/core/lib/multivm/src/versions/testonly/circuits.rs similarity index 61% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs rename to core/lib/multivm/src/versions/testonly/circuits.rs index 7d0dfd1ed0e..de987a8912d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs +++ b/core/lib/multivm/src/versions/testonly/circuits.rs @@ -1,39 +1,41 @@ use zksync_types::{Address, Execute, U256}; +use super::tester::VmTesterBuilder; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + versions::testonly::TestedVm, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) +/// Checks that estimated number of circuits for simple transfer doesn't differ much +/// from hardcoded expected value. +pub(crate) fn test_circuits() { + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::repeat_byte(1)), calldata: Vec::new(), value: U256::from(1u8), - factory_deps: None, + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!res.result.is_failed(), "{res:#?}"); let s = res.statistics.circuit_statistic; // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.003438, 0.00077, 0.1195, 0.1429, 0.0, + const EXPECTED: [f32; 13] = [ + 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, + 0.0, 0.0, 0.0, ]; let actual = [ (s.main_vm, "main_vm"), @@ -47,6 +49,8 @@ fn test_circuits() { (s.keccak256, "keccak256"), (s.ecrecover, "ecrecover"), (s.sha256, "sha256"), + (s.secp256k1_verify, "secp256k1_verify"), + (s.transient_storage_checker, "transient_storage_checker"), ]; for ((actual, name), expected) in actual.iter().zip(EXPECTED) { if expected == 0.0 { diff --git a/core/lib/multivm/src/versions/testonly/code_oracle.rs b/core/lib/multivm/src/versions/testonly/code_oracle.rs new file mode 100644 index 00000000000..767a294f44a --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/code_oracle.rs @@ -0,0 +1,242 @@ +use ethabi::Token; +use zksync_types::{ + get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use super::{ + get_empty_storage, load_precompiles_contract, read_precompiles_contract, read_test_contract, + tester::VmTesterBuilder, TestedVm, +}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + versions::testonly::ContractToDeploy, +}; + +fn generate_large_bytecode() -> Vec { + // This is the maximal possible size of a zkEVM bytecode + vec![2u8; ((1 << 16) - 1) * 32] +} + +pub(crate) fn test_code_oracle() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = read_precompiles_contract(); + + // Filling the zkevm bytecode + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode, + precompiles_contract_address, + )]) + .with_storage(storage) + .build::(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes(&[normal_zkevm_bytecode.as_slice()]); + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // Now, we ask for the same bytecode. We use to partially check whether the memory page with + // the decommitted bytecode gets erased (it shouldn't). + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +fn find_code_oracle_cost_log( + precompiles_contract_address: Address, + logs: &[StorageLogWithPreviousValue], +) -> &StorageLogWithPreviousValue { + logs.iter() + .find(|log| { + *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() + }) + .expect("no code oracle cost log") +} + +pub(crate) fn test_code_oracle_big_bytecode() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = read_precompiles_contract(); + + let big_zkevm_bytecode = generate_large_bytecode(); + let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); + let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); + + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&big_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode, + precompiles_contract_address, + )]) + .with_storage(storage) + .build::(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes(&[big_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +pub(crate) fn test_refunds_in_code_oracle() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = read_precompiles_contract(); + + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + // Execute code oracle twice with identical VM state that only differs in that the queried bytecode + // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas + // for already decommitted codes). + let mut oracle_costs = vec![]; + for decommit in [false, true] { + let mut vm = VmTesterBuilder::new() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode.clone(), + precompiles_contract_address, + )]) + .with_storage(storage.clone()) + .build::(); + + vm.vm.insert_bytecodes(&[normal_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + if decommit { + let is_fresh = vm.vm.manually_decommit(normal_zkevm_bytecode_hash); + assert!(is_fresh); + } + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + let log = + find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); + oracle_costs.push(log.log.value); + } + + // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` + // in `CodeOracle.yul`. + let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); + assert_eq!( + code_oracle_refund, + (4 * (normal_zkevm_bytecode.len() / 32)).into() + ); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs b/core/lib/multivm/src/versions/testonly/default_aa.rs similarity index 50% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs rename to core/lib/multivm/src/versions/testonly/default_aa.rs index b0717a57c56..b3fc5b635de 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/testonly/default_aa.rs @@ -1,31 +1,26 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; +use zksync_test_account::{DeployContractsTx, TxType}; use zksync_types::{ get_code_key, get_known_code_key, get_nonce_key, system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, + utils::storage_key_for_eth_balance, + U256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::h256_to_u256; +use super::{default_pubdata_builder, read_test_contract, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - }, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + vm_latest::utils::fee::get_batch_base_fee, }; -#[test] -fn test_default_aa_interaction() { +pub(crate) fn test_default_aa_interaction() { // In this test, we aim to test whether a simple account interaction (without any fee logic) // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let counter = read_test_contract(); let account = &mut vm.rich_accounts[0]; @@ -34,13 +29,18 @@ fn test_default_aa_interaction() { bytecode_hash, address, } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); + let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.l1_batch_env); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); - vm.vm.execute(VmExecutionMode::Batch); + let batch_result = vm.vm.finish_batch(default_pubdata_builder()); + assert!( + !batch_result.block_tip_execution_result.result.is_failed(), + "Batch tip execution wasn't successful" + ); + vm.vm.get_current_execution_state(); // Both deployment and ordinary nonce should be incremented by one. @@ -53,25 +53,16 @@ fn test_default_aa_interaction() { // The contract should be deployed successfully. let account_code_key = get_code_key(&address); - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - + let operator_balance_key = storage_key_for_eth_balance(&vm.l1_batch_env.fee_account); let expected_fee = maximal_fee - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); + * U256::from(get_batch_base_fee(&vm.l1_batch_env)); - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); + let expected_slots = [ + (account_nonce_key, expected_nonce), + (known_codes_key, 1.into()), + (account_code_key, h256_to_u256(bytecode_hash)), + (operator_balance_key, expected_fee), + ]; + vm.vm.verify_required_storage(&expected_slots); } diff --git a/core/lib/multivm/src/versions/testonly/gas_limit.rs b/core/lib/multivm/src/versions/testonly/gas_limit.rs new file mode 100644 index 00000000000..5e31eb2b159 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/gas_limit.rs @@ -0,0 +1,34 @@ +use zksync_test_account::Account; +use zksync_types::{fee::Fee, Execute}; + +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::TxExecutionMode, + vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, +}; + +/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +pub(crate) fn test_tx_gas_limit_offset() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let gas_limit = 9999.into(); + let tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, + Some(Fee { + gas_limit, + ..Account::default_fee() + }), + ); + + vm.vm.push_transaction(tx); + + let slot = (TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET) as u32; + vm.vm.verify_required_bootloader_heap(&[(slot, gas_limit)]); +} diff --git a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs new file mode 100644 index 00000000000..9d0908807e2 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs @@ -0,0 +1,219 @@ +use std::iter; + +use assert_matches::assert_matches; +use ethabi::Token; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_test_account::{Account, TxType}; +use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; + +use super::{ + read_proxy_counter_contract, read_test_contract, + tester::{VmTester, VmTesterBuilder}, + TestedVm, +}; +use crate::{ + interface::{ + ExecutionResult, InspectExecutionMode, TxExecutionMode, VmExecutionResultAndLogs, + VmInterfaceExt, + }, + versions::testonly::ContractToDeploy, +}; + +pub(crate) fn test_get_used_contracts() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + assert!(vm.vm.known_bytecode_hashes().is_empty()); + + // create and push and execute some not-empty factory deps transaction with success status + // to check that `get_decommitted_hashes()` updates + let contract_code = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); + vm.vm.push_transaction(tx.tx.clone()); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert!(vm + .vm + .decommitted_hashes() + .contains(&h256_to_u256(tx.bytecode_hash))); + + // Note: `Default_AA` will be in the list of used contracts if L2 tx is used + assert_eq!(vm.vm.decommitted_hashes(), vm.vm.known_bytecode_hashes()); + + // create push and execute some non-empty factory deps transaction that fails + // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) + + let calldata = [1, 2, 3]; + let big_calldata: Vec = calldata + .iter() + .cycle() + .take(calldata.len() * 1024) + .cloned() + .collect(); + let account2 = Account::from_seed(u32::MAX); + assert_ne!(account2.address, account.address); + let tx2 = account2.get_l1_tx( + Execute { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata: big_calldata, + value: Default::default(), + factory_deps: vec![vec![1; 32]], + }, + 1, + ); + + vm.vm.push_transaction(tx2.clone()); + + let res2 = vm.vm.execute(InspectExecutionMode::OneTx); + + assert!(res2.result.is_failed()); + + for factory_dep in tx2.execute.factory_deps { + let hash = hash_bytecode(&factory_dep); + let hash_to_u256 = h256_to_u256(hash); + assert!(vm.vm.known_bytecode_hashes().contains(&hash_to_u256)); + assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); + } +} + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = read_test_contract(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +#[derive(Debug)] +struct ProxyCounterData { + proxy_counter_address: Address, + counter_bytecode_hash: U256, +} + +fn execute_proxy_counter( + gas: u32, +) -> (VmTester, ProxyCounterData, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + &proxy_counter_bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + let data = ProxyCounterData { + proxy_counter_address: deploy_tx.address, + counter_bytecode_hash, + }; + (vm, data, exec_result) +} + +pub(crate) fn test_get_used_contracts_with_far_call() { + let (vm, data, exec_result) = execute_proxy_counter::(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +pub(crate) fn test_get_used_contracts_with_out_of_gas_far_call() { + let (mut vm, data, exec_result) = execute_proxy_counter::(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + // Execute another transaction with a successful far call and check that it's still charged for decommitment. + let account = &mut vm.rich_accounts[0]; + let (_, proxy_counter_abi) = read_proxy_counter_contract(); + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(data.proxy_counter_address), + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let proxy_counter_cost_key = StorageKey::new( + AccountTreeId::new(data.proxy_counter_address), + H256::from_low_u64_be(1), + ); + let far_call_cost_log = exec_result + .logs + .storage_logs + .iter() + .find(|log| log.log.key == proxy_counter_cost_key) + .expect("no cost log"); + assert!( + far_call_cost_log.previous_value.is_zero(), + "{far_call_cost_log:?}" + ); + let far_call_cost = h256_to_u256(far_call_cost_log.log.value); + assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs b/core/lib/multivm/src/versions/testonly/is_write_initial.rs similarity index 65% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs rename to core/lib/multivm/src/versions/testonly/is_write_initial.rs index 7da250ef7a9..cac9be17363 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/testonly/is_write_initial.rs @@ -1,26 +1,21 @@ -use crate::interface::storage::ReadStorage; +use zksync_test_account::TxType; use zksync_types::get_nonce_key; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, +use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ + storage::ReadStorage, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, }; -#[test] -fn test_is_write_initial_behaviour() { +pub(crate) fn test_is_write_initial_behaviour() { // In this test, we check result of `is_write_initial` at different stages. // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; let nonce_key = get_nonce_key(&account.address); // Check that the next write to the nonce key will be initial. @@ -34,7 +29,7 @@ fn test_is_write_initial_behaviour() { let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); // Check that `is_write_initial` still returns true for the nonce key. assert!(vm diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs similarity index 71% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs rename to core/lib/multivm/src/versions/testonly/l1_tx_execution.rs index 40915cf931c..e98a8385f02 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs @@ -1,61 +1,57 @@ use ethabi::Token; use zksync_contracts::l1_messenger_contract; use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; +use zksync_test_account::TxType; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, Execute, ExecuteTransactionCommon, U256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{h256_to_u256, u256_to_h256}; +use super::{read_test_contract, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - }, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + utils::StorageWritesDeduplicator, }; -#[test] -fn test_l1_tx_execution() { +pub(crate) fn test_l1_tx_execution() { // In this test, we try to execute a contract deployment from L1 // Here instead of marking code hash via the bootloader means, we will be // using L1->L2 communication, the same it would likely be done during the priority mode. - // There are always at least 7 initial writes here, because we pay fees from l1: + // There are always at least 9 initial writes here, because we pay fees from l1: // - `totalSupply` of ETH token // - balance of the refund recipient // - balance of the bootloader // - `tx_rolling` hash + // - `gasPerPubdataByte` + // - `basePubdataSpent` // - rolling hash of L2->L1 logs // - transaction number in block counter // - L2->L1 log counter in `L1Messenger` - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; + // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. + let basic_initial_writes = 5; - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let contract_code = read_test_contract(); let account = &mut vm.rich_accounts[0]; let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); + let tx_hash = deploy_tx.tx.hash(); let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { shard_id: 0, is_service: true, tx_number_in_block: 0, sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), + key: tx_hash, value: u256_to_h256(U256::from(1u32)), }] .into_iter() @@ -64,7 +60,7 @@ fn test_l1_tx_execution() { vm.vm.push_transaction(deploy_tx.tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); // The code hash of the deployed contract should be marked as republished. let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); @@ -72,14 +68,12 @@ fn test_l1_tx_execution() { // The contract should be deployed successfully. let account_code_key = get_code_key(&deploy_tx.address); - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; assert!(!res.result.is_failed()); - verify_required_storage(&vm.vm.state, expected_slots); - + vm.vm.verify_required_storage(&[ + (known_codes_key, U256::from(1)), + (account_code_key, h256_to_u256(deploy_tx.bytecode_hash)), + ]); assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); let tx = account.get_test_contract_transaction( @@ -90,12 +84,12 @@ fn test_l1_tx_execution() { TxType::L1 { serial_id: 0 }, ); vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); + assert_eq!(res.initial_storage_writes, basic_initial_writes); let tx = account.get_test_contract_transaction( deploy_tx.address, @@ -105,10 +99,10 @@ fn test_l1_tx_execution() { TxType::L1 { serial_id: 0 }, ); vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract + // We changed one slot inside contract. assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); // No repeated writes @@ -116,10 +110,11 @@ fn test_l1_tx_execution() { assert_eq!(res.repeated_storage_writes, 0); vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; + let storage_logs = vm.vm.execute(InspectExecutionMode::OneTx).logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); + // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. + // But now the base pubdata spent has changed too. + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); assert_eq!(res.repeated_storage_writes, repeated_writes); let tx = account.get_test_contract_transaction( @@ -130,27 +125,26 @@ fn test_l1_tx_execution() { TxType::L1 { serial_id: 1 }, ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); // Method is not payable tx should fail assert!(result.result.is_failed(), "The transaction should fail"); let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); + assert_eq!(res.repeated_storage_writes, 1); } -#[test] -fn test_l1_tx_execution_high_gas_limit() { +pub(crate) fn test_l1_tx_execution_high_gas_limit() { // In this test, we try to execute an L1->L2 transaction with a high gas limit. // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; @@ -167,7 +161,7 @@ fn test_l1_tx_execution_high_gas_limit() { Execute { contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), - factory_deps: None, + factory_deps: vec![], calldata, }, 0, @@ -182,7 +176,7 @@ fn test_l1_tx_execution_high_gas_limit() { vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(res.result.is_failed(), "The transaction should've failed"); } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs b/core/lib/multivm/src/versions/testonly/l2_blocks.rs similarity index 62% rename from core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs rename to core/lib/multivm/src/versions/testonly/l2_blocks.rs index 073d9ce5800..947d8b5859f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/testonly/l2_blocks.rs @@ -3,110 +3,104 @@ //! The description for each of the tests can be found in the corresponding `.yul` file. //! -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; +use assert_matches::assert_matches; use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, + block::{pack_block_info, L2BlockHasher}, + AccountTreeId, Address, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, + L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, H256, SYSTEM_CONTEXT_ADDRESS, + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; +use super::{default_l1_batch, get_empty_storage, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, + interface::{ + storage::StorageView, ExecutionResult, Halt, InspectExecutionMode, L2BlockEnv, + TxExecutionMode, VmInterfaceExt, + }, + vm_latest::{ + constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, }, - HistoryMode, }; fn get_l1_noop() -> Transaction { Transaction { common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), + sender: Address::repeat_byte(1), gas_limit: U256::from(2000000u32), gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), ..Default::default() }), execute: Execute { - contract_address: H160::zero(), + contract_address: Some(Address::repeat_byte(0xc0)), calldata: vec![], value: U256::zero(), - factory_deps: None, + factory_deps: vec![], }, received_timestamp_ms: 0, raw_bytes: None, } } -#[test] -fn test_l2_block_initialization_timestamp() { +pub(crate) fn test_l2_block_initialization_timestamp() { // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp + // Here we check that the first block must have timestamp that is greater or equal to the timestamp // of the current batch. - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { + // Override the timestamp of the current L2 block to be 0. + vm.vm.push_l2_block_unchecked(L2BlockEnv { number: 1, timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), max_virtual_blocks_to_create: 1, }); let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); - assert_eq!( + assert_matches!( res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} + ExecutionResult::Halt { reason: Halt::FailedToSetL2Block(msg) } + if msg.contains("timestamp") ); } -#[test] -fn test_l2_block_initialization_number_non_zero() { +pub(crate) fn test_l2_block_initialization_number_non_zero() { // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. + // Here we check that the first L2 block number can not be zero. let l1_batch = default_l1_batch(L1BatchNumber(1)); let first_l2_block = L2BlockEnv { number: 0, timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), max_virtual_blocks_to_create: 1, }; - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); + set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( res.result, @@ -118,26 +112,26 @@ fn test_l2_block_initialization_number_non_zero() { ); } -fn test_same_l2_block( +fn test_same_l2_block( expected_error: Option, override_timestamp: Option, override_prev_block_hash: Option, ) { let mut l1_batch = default_l1_batch(L1BatchNumber(1)); l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!res.result.is_failed()); - let mut current_l2_block = vm.vm.batch_env.first_l2_block; + let mut current_l2_block = vm.l1_batch_env.first_l2_block; if let Some(timestamp) = override_timestamp { current_l2_block.timestamp = timestamp; @@ -151,10 +145,9 @@ fn test_same_l2_block( } vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); + set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); @@ -163,12 +156,11 @@ fn test_same_l2_block( } } -#[test] -fn test_l2_block_same_l2_block() { +pub(crate) fn test_l2_block_same_l2_block() { // This test aims to test the case when there are multiple transactions inside the same L2 block. // Case 1: Incorrect timestamp - test_same_l2_block( + test_same_l2_block::( Some(Halt::FailedToSetL2Block( "The timestamp of the same L2 block must be same".to_string(), )), @@ -177,7 +169,7 @@ fn test_l2_block_same_l2_block() { ); // Case 2: Incorrect previous block hash - test_same_l2_block( + test_same_l2_block::( Some(Halt::FailedToSetL2Block( "The previous hash of the same L2 block must be same".to_string(), )), @@ -186,10 +178,10 @@ fn test_l2_block_same_l2_block() { ); // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); + test_same_l2_block::(None, None, None); } -fn test_new_l2_block( +fn test_new_l2_block( first_l2_block: L2BlockEnv, overriden_second_block_number: Option, overriden_second_block_timestamp: Option, @@ -200,23 +192,23 @@ fn test_new_l2_block( l1_batch.timestamp = 1; l1_batch.first_l2_block = first_l2_block; - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_l1_batch_env(l1_batch) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let l1_tx = get_l1_noop(); // Firstly we execute the first transaction vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); - let mut second_l2_block = vm.vm.batch_env.first_l2_block; + let mut second_l2_block = vm.l1_batch_env.first_l2_block; second_l2_block.number += 1; second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); + second_l2_block.prev_block_hash = vm.vm.last_l2_block_hash(); if let Some(block_number) = overriden_second_block_number { second_l2_block.number = block_number; @@ -228,11 +220,10 @@ fn test_new_l2_block( second_l2_block.prev_block_hash = prev_block_hash; } - vm.vm.bootloader_state.push_l2_block(second_l2_block); - + vm.vm.push_l2_block_unchecked(second_l2_block); vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); } else { @@ -240,19 +231,18 @@ fn test_new_l2_block( } } -#[test] -fn test_l2_block_new_l2_block() { +pub(crate) fn test_l2_block_new_l2_block() { // This test is aimed to cover potential issue let correct_first_block = L2BlockEnv { number: 1, timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), max_virtual_blocks_to_create: 1, }; // Case 1: Block number increasing by more than 1 - test_new_l2_block( + test_new_l2_block::( correct_first_block, Some(3), None, @@ -263,7 +253,7 @@ fn test_l2_block_new_l2_block() { ); // Case 2: Timestamp not increasing - test_new_l2_block( + test_new_l2_block::( correct_first_block, None, Some(1), @@ -272,7 +262,7 @@ fn test_l2_block_new_l2_block() { ); // Case 3: Incorrect previous block hash - test_new_l2_block( + test_new_l2_block::( correct_first_block, None, None, @@ -283,11 +273,11 @@ fn test_l2_block_new_l2_block() { ); // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); + test_new_l2_block::(correct_first_block, None, None, None, None); } #[allow(clippy::too_many_arguments)] -fn test_first_in_batch( +fn test_first_in_batch( miniblock_timestamp: u64, miniblock_number: u32, pending_txs_hash: H256, @@ -301,16 +291,15 @@ fn test_first_in_batch( l1_batch.number += 1; l1_batch.timestamp = new_batch_timestamp; - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_l1_batch_env(l1_batch) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let l1_tx = get_l1_noop(); // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); let miniblock_info_slot = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, @@ -325,42 +314,43 @@ fn test_first_in_batch( ); let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - storage_ptr.borrow_mut().set_value( + let mut storage = get_empty_storage(); + storage.set_value( miniblock_info_slot, u256_to_h256(pack_block_info( miniblock_number as u64, miniblock_timestamp, )), ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( + storage.set_value(pending_txs_hash_slot, pending_txs_hash); + storage.set_value( batch_info_slot, u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), ); - storage_ptr.borrow_mut().set_value( + storage.set_value( prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), + L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), ); + // Replace the storage entirely. It's not enough to write to the underlying storage (since read values are already cached + // in the storage view). + *vm.storage.borrow_mut() = StorageView::new(storage); // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. // And then override it with the user-provided value - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); + let last_l2_block = vm.l1_batch_env.first_l2_block; let new_l2_block = L2BlockEnv { number: last_l2_block.number + 1, timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), + prev_block_hash: vm.vm.last_l2_block_hash(), max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, }; - vm.vm.bootloader_state.push_l2_block(new_l2_block); + vm.vm.push_l2_block_unchecked(new_l2_block); vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); + set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); } else { @@ -368,12 +358,11 @@ fn test_first_in_batch( } } -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) +pub(crate) fn test_l2_block_first_in_batch() { + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) .finalize(ProtocolVersionId::latest()); - test_first_in_batch( + test_first_in_batch::( 1, 1, H256::zero(), @@ -389,10 +378,10 @@ fn test_l2_block_first_in_batch() { None, ); - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) .finalize(ProtocolVersionId::latest()); - test_first_in_batch( + test_first_in_batch::( 8, 1, H256::zero(), @@ -409,29 +398,19 @@ fn test_l2_block_first_in_batch() { ); } -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { +fn set_manual_l2_block_info(vm: &mut impl TestedVm, tx_number: usize, block_info: L2BlockEnv) { let fictive_miniblock_position = TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) + vm.write_to_bootloader_heap(&[ + (fictive_miniblock_position, block_info.number.into()), + (fictive_miniblock_position + 1, block_info.timestamp.into()), + ( + fictive_miniblock_position + 2, + h256_to_u256(block_info.prev_block_hash), + ), + ( + fictive_miniblock_position + 3, + block_info.max_virtual_blocks_to_create.into(), + ), + ]) } diff --git a/core/lib/multivm/src/versions/testonly/mod.rs b/core/lib/multivm/src/versions/testonly/mod.rs new file mode 100644 index 00000000000..eece1d475bb --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/mod.rs @@ -0,0 +1,243 @@ +//! Reusable tests and tooling for low-level VM testing. +//! +//! # How it works +//! +//! - [`TestedVm`] defines test-specific VM extensions. It's currently implemented for the latest legacy VM +//! (`vm_latest`) and the fast VM (`vm_fast`). +//! - Submodules of this module define test functions generic by `TestedVm`. Specific VM versions implement `TestedVm` +//! and can create tests based on these test functions with minimum amount of boilerplate code. +//! - Tests use [`VmTester`] built using [`VmTesterBuilder`] to create a VM instance. This allows to set up storage for the VM, +//! custom [`SystemEnv`] / [`L1BatchEnv`], deployed contracts, pre-funded accounts etc. + +use std::{collections::HashSet, rc::Rc}; + +use ethabi::Contract; +use once_cell::sync::Lazy; +use zksync_contracts::{ + load_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, BaseSystemContracts, + SystemContractCode, +}; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, + utils::storage_key_for_eth_balance, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, +}; + +pub(super) use self::tester::{TestedVm, VmTester, VmTesterBuilder}; +use crate::{ + interface::storage::InMemoryStorage, pubdata_builders::RollupPubdataBuilder, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(super) mod block_tip; +pub(super) mod bootloader; +pub(super) mod bytecode_publishing; +pub(super) mod circuits; +pub(super) mod code_oracle; +pub(super) mod default_aa; +pub(super) mod gas_limit; +pub(super) mod get_used_contracts; +pub(super) mod is_write_initial; +pub(super) mod l1_tx_execution; +pub(super) mod l2_blocks; +pub(super) mod nonce_holder; +pub(super) mod precompiles; +pub(super) mod refunds; +pub(super) mod require_eip712; +pub(super) mod rollbacks; +pub(super) mod secp256r1; +pub(super) mod simple_execution; +pub(super) mod storage; +mod tester; +pub(super) mod tracing_execution_error; +pub(super) mod transfer; +pub(super) mod upgrade; + +static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +fn get_empty_storage() -> InMemoryStorage { + InMemoryStorage::with_system_contracts(hash_bytecode) +} + +pub(crate) fn read_test_contract() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") +} + +fn get_complex_upgrade_abi() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" + ) +} + +fn read_complex_upgrade() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") +} + +fn read_precompiles_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +fn load_precompiles_contract() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +fn read_proxy_counter_contract() -> (Vec, Contract) { + const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; + (read_bytecode(PATH), load_contract(PATH)) +} + +fn read_nonce_holder_tester() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") +} + +fn read_expensive_contract() -> (Vec, Contract) { + const PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; + (read_bytecode(PATH), load_contract(PATH)) +} + +fn read_many_owners_custom_account_contract() -> (Vec, Contract) { + let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; + (read_bytecode(path), load_contract(path)) +} + +fn read_error_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ) +} + +pub(crate) fn read_max_depth_contract() -> Vec { + read_zbin_bytecode( + "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", + ) +} + +pub(crate) fn read_simple_transfer_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/simple-transfer/simple-transfer.sol/SimpleTransfer.json", + ) +} + +pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { + let bootloader_code = read_bootloader_code(test); + let bootloader_hash = hash_bytecode(&bootloader_code); + SystemContractCode { + code: bytes_to_be_words(bootloader_code), + hash: bootloader_hash, + } +} + +pub(crate) fn filter_out_base_system_contracts(all_bytecode_hashes: &mut HashSet) { + all_bytecode_hashes.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + all_bytecode_hashes.remove(&h256_to_u256(evm_emulator.hash)); + } +} + +pub(super) fn default_system_env() -> SystemEnv { + SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + } +} + +pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + // Add a bias to the timestamp to make it more realistic / "random". + let timestamp = 1_700_000_000 + u64::from(number.0); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::repeat_byte(1), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(super) fn default_pubdata_builder() -> Rc { + Rc::new(RollupPubdataBuilder::new(Address::zero())) +} + +pub(super) fn make_address_rich(storage: &mut InMemoryStorage, address: Address) { + let key = storage_key_for_eth_balance(&address); + storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); +} + +#[derive(Debug, Clone)] +pub(super) struct ContractToDeploy { + bytecode: Vec, + address: Address, + is_account: bool, + is_funded: bool, +} + +impl ContractToDeploy { + pub fn new(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: false, + is_funded: false, + } + } + + pub fn account(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: true, + is_funded: false, + } + } + + #[must_use] + pub fn funded(mut self) -> Self { + self.is_funded = true; + self + } + + pub fn insert(&self, storage: &mut InMemoryStorage) { + let deployer_code_key = get_code_key(&self.address); + storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); + if self.is_account { + let is_account_key = get_is_account_key(&self.address); + storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); + + if self.is_funded { + make_address_rich(storage, self.address); + } + } + + /// Inserts the contracts into the test environment, bypassing the deployer system contract. + pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { + for contract in contracts { + contract.insert(storage); + } + } +} diff --git a/core/lib/multivm/src/versions/testonly/nonce_holder.rs b/core/lib/multivm/src/versions/testonly/nonce_holder.rs new file mode 100644 index 00000000000..36f736c0bbe --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/nonce_holder.rs @@ -0,0 +1,200 @@ +use zksync_test_account::Account; +use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; + +use super::{read_nonce_holder_tester, tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{ + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, TxRevertReason, VmInterfaceExt, + VmRevertReason, +}; + +pub enum NonceHolderTestMode { + SetValueUnderNonce, + IncreaseMinNonceBy5, + IncreaseMinNonceTooMuch, + LeaveNonceUnused, + IncreaseMinNonceBy1, + SwitchToArbitraryOrdering, +} + +impl From for u8 { + fn from(mode: NonceHolderTestMode) -> u8 { + match mode { + NonceHolderTestMode::SetValueUnderNonce => 0, + NonceHolderTestMode::IncreaseMinNonceBy5 => 1, + NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, + NonceHolderTestMode::LeaveNonceUnused => 3, + NonceHolderTestMode::IncreaseMinNonceBy1 => 4, + NonceHolderTestMode::SwitchToArbitraryOrdering => 5, + } + } +} + +fn run_nonce_test( + vm: &mut impl TestedVm, + account: &mut Account, + nonce: u32, + test_mode: NonceHolderTestMode, + error_message: Option, + comment: &'static str, +) { + vm.make_snapshot(); + let mut transaction = account.get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: Some(account.address), + calldata: vec![12], + value: Default::default(), + factory_deps: vec![], + }, + None, + Nonce(nonce), + ); + let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { + unreachable!(); + }; + tx_data.signature = vec![test_mode.into()]; + vm.push_transaction(transaction); + let result = vm.execute(InspectExecutionMode::OneTx); + + if let Some(msg) = error_message { + let expected_error = + TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { + msg, + data: vec![], + })); + let ExecutionResult::Halt { reason } = &result.result else { + panic!("Expected revert, got {:?}", result.result); + }; + assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); + vm.rollback_to_the_latest_snapshot(); + } else { + assert!(!result.result.is_failed(), "{}", comment); + vm.pop_snapshot_no_rollback(); + } +} + +pub(crate) fn test_nonce_holder() { + let builder = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1); + let account_address = builder.rich_account(0).address; + let mut vm = builder + .with_custom_contracts(vec![ContractToDeploy::account( + read_nonce_holder_tester(), + account_address, + )]) + .build::(); + let account = &mut vm.rich_accounts[0]; + let hex_addr = hex::encode(account.address.to_fixed_bytes()); + + // Test 1: trying to set value under non sequential nonce value. + run_nonce_test( + &mut vm.vm, + account, + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), + "Allowed to set value under non sequential value", + ); + + // Test 2: increase min nonce by 1 with sequential nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 0u32, + NonceHolderTestMode::IncreaseMinNonceBy1, + None, + "Failed to increment nonce by 1 for sequential account", + ); + + // Test 3: correctly set value under nonce with sequential nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Failed to set value under nonce sequential value", + ); + + // Test 5: migrate to the arbitrary nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 2u32, + NonceHolderTestMode::SwitchToArbitraryOrdering, + None, + "Failed to switch to arbitrary ordering", + ); + + // Test 6: increase min nonce by 5 + run_nonce_test( + &mut vm.vm, + account, + 6u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Failed to increase min nonce by 5", + ); + + // Test 7: since the nonces in range [6,10] are no longer allowed, the + // tx with nonce 10 should not be allowed + run_nonce_test( + &mut vm.vm, + account, + 10u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), + "Allowed to reuse nonce below the minimal one", + ); + + // Test 8: we should be able to use nonce 13 + run_nonce_test( + &mut vm.vm, + account, + 13u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Did not allow to use unused nonce 10", + ); + + // Test 9: we should not be able to reuse nonce 13 + run_nonce_test( + &mut vm.vm, + account, + 13u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), + "Allowed to reuse the same nonce twice", + ); + + // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 + run_nonce_test( + &mut vm.vm, + account, + 14u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Did not allow to use a bumped nonce", + ); + + // Test 11: Do not allow bumping nonce by too much + run_nonce_test( + &mut vm.vm, + account, + 16u32, + NonceHolderTestMode::IncreaseMinNonceTooMuch, + Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), + "Allowed for incrementing min nonce too much", + ); + + // Test 12: Do not allow not setting a nonce as used + run_nonce_test( + &mut vm.vm, + account, + 16u32, + NonceHolderTestMode::LeaveNonceUnused, + Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), + "Allowed to leave nonce as unused", + ); +} diff --git a/core/lib/multivm/src/versions/testonly/precompiles.rs b/core/lib/multivm/src/versions/testonly/precompiles.rs new file mode 100644 index 00000000000..2e26dc134b0 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/precompiles.rs @@ -0,0 +1,110 @@ +use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; +use zksync_types::{Address, Execute}; + +use super::{read_precompiles_contract, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + versions::testonly::ContractToDeploy, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(crate) fn test_keccak() { + // Execute special transaction and check that at least 1000 keccak calls were made. + let contract = read_precompiles_contract(); + let address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); + + // calldata for `doKeccak(1000)`. + let keccak1000_calldata = + "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: hex::decode(keccak1000_calldata).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let keccak_count = exec_result.statistics.circuit_statistic.keccak256 + * get_geometry_config().cycles_per_keccak256_circuit as f32; + assert!(keccak_count >= 1000.0, "{keccak_count}"); +} + +pub(crate) fn test_sha256() { + // Execute special transaction and check that at least 1000 `sha256` calls were made. + let contract = read_precompiles_contract(); + let address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); + + // calldata for `doSha256(1000)`. + let sha1000_calldata = + "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: hex::decode(sha1000_calldata).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let sha_count = exec_result.statistics.circuit_statistic.sha256 + * get_geometry_config().cycles_per_sha256_circuit as f32; + assert!(sha_count >= 1000.0, "{sha_count}"); +} + +pub(crate) fn test_ecrecover() { + // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(account.address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover + * get_geometry_config().cycles_per_ecrecover_circuit as f32; + assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); +} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs b/core/lib/multivm/src/versions/testonly/refunds.rs similarity index 51% rename from core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs rename to core/lib/multivm/src/versions/testonly/refunds.rs index 54c281a9939..edab843be4f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs +++ b/core/lib/multivm/src/versions/testonly/refunds.rs @@ -1,32 +1,30 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; +use ethabi::Token; +use zksync_test_account::TxType; +use zksync_types::{Address, Execute, U256}; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; +use super::{ + default_pubdata_builder, read_expensive_contract, read_test_contract, tester::VmTesterBuilder, + ContractToDeploy, TestedVm, +}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; -#[test] -fn test_predetermined_refunded_gas() { +pub(crate) fn test_predetermined_refunded_gas() { // In this test, we compare the execution of the bootloader with the predefined // refunded gas and without them - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); + .with_rich_accounts(1) + .build::(); + let l1_batch = vm.l1_batch_env.clone(); let counter = read_test_contract(); let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); + let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); @@ -39,7 +37,10 @@ fn test_predetermined_refunded_gas() { ); assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let result_without_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result_without_predefined_refunds.result.is_failed(),); @@ -47,21 +48,21 @@ fn test_predetermined_refunded_gas() { // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. // But the overall result should be the same - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_l1_batch_env(l1_batch.clone()) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); + .with_rich_accounts(1) + .build::(); + assert_eq!(account.address(), vm.rich_accounts[0].address()); - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); + .push_transaction_with_refund(tx.clone(), result.refunds.gas_refunded); - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let result_with_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result_with_predefined_refunds.result.is_failed()); @@ -90,13 +91,8 @@ fn test_predetermined_refunded_gas() { ); assert_eq!( - current_state_with_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries + current_state_with_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs ); assert_eq!( current_state_with_predefined_refunds.used_contract_hashes, @@ -106,17 +102,21 @@ fn test_predetermined_refunded_gas() { // In this test we put the different refund from the operator. // We still can't use the refund tracer, because it will override the refund. // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_l1_batch_env(l1_batch) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); + .with_rich_accounts(1) + .build::(); + assert_eq!(account.address(), vm.rich_accounts[0].address()); let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); + .push_transaction_with_refund(tx, changed_operator_suggested_refund); + let result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result.result.is_failed()); @@ -147,26 +147,78 @@ fn test_predetermined_refunded_gas() { current_state_without_predefined_refunds.system_logs ); - assert_eq!( - current_state_with_changed_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - assert_eq!( current_state_with_changed_predefined_refunds - .storage_log_queries + .deduplicated_storage_logs .len(), current_state_without_predefined_refunds - .storage_log_queries + .deduplicated_storage_logs .len() ); assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries + current_state_with_changed_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs ); assert_eq!( current_state_with_changed_predefined_refunds.used_contract_hashes, current_state_without_predefined_refunds.used_contract_hashes ); } + +pub(crate) fn test_negative_pubdata_for_transaction() { + let expensive_contract_address = Address::repeat_byte(1); + let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); + let expensive_function = expensive_contract.function("expensive").unwrap(); + let cleanup_function = expensive_contract.function("cleanUp").unwrap(); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + expensive_contract_bytecode, + expensive_contract_address, + )]) + .build::(); + + let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(expensive_contract_address), + calldata: expensive_function + .encode_input(&[Token::Uint(10.into())]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(expensive_tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. + let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(expensive_contract_address), + calldata: cleanup_function.encode_input(&[]).unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(clean_up_tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + assert!(result.refunds.operator_suggested_refund > 0); + assert_eq!( + result.refunds.gas_refunded, + result.refunds.operator_suggested_refund + ); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs b/core/lib/multivm/src/versions/testonly/require_eip712.rs similarity index 61% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs rename to core/lib/multivm/src/versions/testonly/require_eip712.rs index 15f4504d6e1..e789fbda290 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/testonly/require_eip712.rs @@ -1,57 +1,38 @@ -use std::convert::TryInto; - use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; +use zksync_eth_signer::TransactionParameters; use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, + fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, Address, Eip712Domain, Execute, L2ChainId, Nonce, Transaction, U256, }; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, +use super::{ + read_many_owners_custom_account_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, }; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. /// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { +pub(crate) fn test_require_eip712() { // Use 3 accounts: // - `private_address` - EOA account, where we have the key // - `account_address` - AA account, where the contract is deployed // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); + let aa_address = Address::repeat_byte(0x10); + let beneficiary_address = Address::repeat_byte(0x20); let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) + .with_custom_contracts(vec![ + ContractToDeploy::account(bytecode, aa_address).funded() + ]) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - + .with_rich_accounts(1) + .build::(); + assert_eq!(vm.get_eth_balance(beneficiary_address), U256::from(0)); let chain_id: u32 = 270; + let mut private_account = vm.rich_accounts[0].clone(); // First, let's set the owners of the AA account to the `private_address`. // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). @@ -62,16 +43,16 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(aa_address), calldata: encoded_input, value: Default::default(), - factory_deps: None, + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); let private_account_balance = vm.get_eth_balance(private_account.address); @@ -80,7 +61,7 @@ async fn test_require_eip712() { // Normally this would not work - unless the operator is malicious. let aa_raw_tx = TransactionParameters { nonce: U256::from(0), - to: Some(beneficiary.address), + to: Some(beneficiary_address), gas: U256::from(100000000), gas_price: Some(U256::from(10000000)), value: U256::from(888000088), @@ -94,20 +75,21 @@ async fn test_require_eip712() { blob_versioned_hashes: None, }; - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); l2_tx.set_input(aa_tx, hash); // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); + l2_tx.common_data.initiator_address = aa_address; + let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); + assert_eq!( - vm.get_eth_balance(beneficiary.address), + vm.get_eth_balance(beneficiary_address), U256::from(888000088) ); // Make sure that the tokens were transferred from the AA account. @@ -118,7 +100,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary_address), vec![], Nonce(1), Fee { @@ -127,34 +109,34 @@ async fn test_require_eip712() { max_priority_fee_per_gas: U256::from(1000000000), gas_per_pubdata_limit: U256::from(1000000000), }, - account_abstraction.address, + aa_address, U256::from(28374938), - None, + vec![], Default::default(), ); - let transaction_request: TransactionRequest = tx_712.into(); + let mut transaction_request: TransactionRequest = tx_712.into(); + transaction_request.chain_id = Some(chain_id.into()); let domain = Eip712Domain::new(L2ChainId::from(chain_id)); let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) - .await .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); + let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); - let transaction: Transaction = l2_tx.try_into().unwrap(); + let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( - vm.get_eth_balance(beneficiary.address), + vm.get_eth_balance(beneficiary_address), U256::from(916375026) ); assert_eq!( diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs b/core/lib/multivm/src/versions/testonly/rollbacks.rs similarity index 50% rename from core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs rename to core/lib/multivm/src/versions/testonly/rollbacks.rs index 240b7188377..cab3427899e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/testonly/rollbacks.rs @@ -1,24 +1,24 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; +use std::collections::HashMap; -use zksync_types::{Execute, U256}; - -use crate::interface::TxExecutionMode; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, +use assert_matches::assert_matches; +use ethabi::Token; +use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; +use zksync_test_account::{DeployContractsTx, TxType}; +use zksync_types::{Address, Execute, Nonce, U256}; + +use super::{ + read_test_contract, + tester::{TransactionTestInfo, TxModifier, VmTesterBuilder}, + ContractToDeploy, TestedVm, }; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; +use crate::interface::{storage::ReadStorage, ExecutionResult, TxExecutionMode, VmInterfaceExt}; -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) +pub(crate) fn test_vm_rollbacks() { + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let mut account = vm.rich_accounts[0].clone(); let counter = read_test_contract(); @@ -40,34 +40,51 @@ fn test_vm_rollbacks() { TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_0.clone(), false), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_1, false), // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_2.clone(), false), // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), ]); - assert_eq!(result_without_rollbacks, result_with_rollbacks); + pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); } -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) +pub(crate) fn test_vm_loadnext_rollbacks() { + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let mut account = vm.rich_accounts[0].clone(); let loadnext_contract = get_loadnext_contract(); @@ -85,7 +102,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -96,14 +113,14 @@ fn test_vm_loadnext_rollbacks() { } .to_bytes(), value: Default::default(), - factory_deps: None, + factory_deps: vec![], }, None, ); let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -114,7 +131,7 @@ fn test_vm_loadnext_rollbacks() { } .to_bytes(), value: Default::default(), - factory_deps: None, + factory_deps: vec![], }, None, ); @@ -133,14 +150,63 @@ fn test_vm_loadnext_rollbacks() { TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), TransactionTestInfo::new_rejected( loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), ), TransactionTestInfo::new_processed(loadnext_tx_1, false), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), TransactionTestInfo::new_processed(loadnext_tx_2, false), ]); assert_eq!(result_without_rollbacks, result_with_rollbacks); } + +pub(crate) fn test_rollback_in_call_mode() { + let counter_bytecode = read_test_contract(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + + let storage_logs = &vm_result.logs.storage_logs; + let deduplicated_logs = storage_logs + .iter() + .filter_map(|log| log.log.is_write().then_some((log.log.key, log.log.value))); + let deduplicated_logs: HashMap<_, _> = deduplicated_logs.collect(); + // Check that all storage changes are reverted + let mut storage = vm.storage.borrow_mut(); + for (key, value) in deduplicated_logs { + assert_eq!(storage.inner_mut().read_value(&key), value); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/testonly/secp256r1.rs similarity index 89% rename from core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs rename to core/lib/multivm/src/versions/testonly/secp256r1.rs index 55ca372c4a9..37d428f8210 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/testonly/secp256r1.rs @@ -3,21 +3,18 @@ use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; use zksync_types::{web3::keccak256, Execute, H256, U256}; use zksync_utils::h256_to_u256; -use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::tester::VmTesterBuilder, -}; +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; -#[test] -fn test_sekp256r1() { +pub(crate) fn test_secp256r1() { // In this test, we aim to test whether a simple account interaction (without any fee logic) // will work. The account will try to deploy a simple contract from integration tests. let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_execution_mode(TxExecutionMode::EthCall) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; @@ -58,7 +55,7 @@ fn test_sekp256r1() { vm.vm.push_transaction(tx); - let execution_result = vm.vm.execute(VmExecutionMode::Batch); + let execution_result = vm.vm.execute(InspectExecutionMode::OneTx); let ExecutionResult::Success { output } = execution_result.result else { panic!("batch failed") diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs b/core/lib/multivm/src/versions/testonly/simple_execution.rs similarity index 63% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs rename to core/lib/multivm/src/versions/testonly/simple_execution.rs index 57b37e67b76..96239fb362d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/testonly/simple_execution.rs @@ -1,15 +1,14 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::tester::{TxType, VmTesterBuilder}, -}; +use assert_matches::assert_matches; +use zksync_test_account::TxType; -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) +use super::{default_pubdata_builder, tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, InspectExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_estimate_fee() { + let mut vm_tester = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); vm_tester.deploy_test_contract(); let account = &mut vm_tester.rich_accounts[0]; @@ -24,17 +23,15 @@ fn estimate_fee() { vm_tester.vm.push_transaction(tx); - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); + let result = vm_tester.vm.execute(InspectExecutionMode::OneTx); assert_matches!(result.result, ExecutionResult::Success { .. }); } -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) +pub(crate) fn test_simple_execute() { + let mut vm_tester = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); vm_tester.deploy_test_contract(); @@ -67,12 +64,14 @@ fn simple_execute() { vm.push_transaction(tx1); vm.push_transaction(tx2); vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); + let block_tip = vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/testonly/storage.rs b/core/lib/multivm/src/versions/testonly/storage.rs new file mode 100644 index 00000000000..efe7be1edbd --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/storage.rs @@ -0,0 +1,125 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_types::{Address, Execute, U256}; + +use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; + +fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { + let bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let test_contract_address = Address::repeat_byte(1); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata: first_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata: second_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "First tx failed"); + vm.vm.pop_snapshot_no_rollback(); + + // We rollback once because transient storage and rollbacks are a tricky combination. + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2.clone()); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed"); + vm.vm.rollback_to_the_latest_snapshot(); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed on second run"); + + result.statistics.pubdata_published +} + +fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { + test_storage::(vec![], second_tx_calldata) +} + +pub(crate) fn test_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + // In all of the tests below we provide the first tx to ensure that the tracers will not include + // the statistics from the start of the bootloader and will only include those for the transaction itself. + + let base_pubdata = test_storage_one_tx::(vec![]); + let simple_test_pubdata = test_storage_one_tx::( + contract + .function("simpleWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_pubdata = test_storage_one_tx::( + contract + .function("resettingWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_via_revert_pubdata = test_storage_one_tx::( + contract + .function("resettingWriteViaRevert") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + + assert_eq!(simple_test_pubdata - base_pubdata, 65); + assert_eq!(resetting_write_pubdata - base_pubdata, 34); + assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +} + +pub(crate) fn test_transient_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let first_tstore_test = contract + .function("testTransientStore") + .unwrap() + .encode_input(&[]) + .unwrap(); + // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. + let second_tstore_test = contract + .function("assertTValue") + .unwrap() + .encode_input(&[Token::Uint(U256::zero())]) + .unwrap(); + + test_storage::(first_tstore_test, second_tstore_test); +} diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs new file mode 100644 index 00000000000..716b9386235 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -0,0 +1,231 @@ +use std::{collections::HashSet, fmt, rc::Rc}; + +use zksync_contracts::BaseSystemContracts; +use zksync_test_account::{Account, TxType}; +use zksync_types::{ + utils::{deployed_address_create, storage_key_for_eth_balance}, + writes::StateDiffRecord, + Address, L1BatchNumber, StorageKey, Transaction, H256, U256, +}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, CurrentExecutionState, InspectExecutionMode, VmExecutionResultAndLogs, + VmInterfaceHistoryEnabled, +}; + +pub(crate) use self::transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; +use super::{get_empty_storage, read_test_contract}; +use crate::{ + interface::{ + storage::{InMemoryStorage, StoragePtr, StorageView}, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterfaceExt, + }, + versions::testonly::{ + default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, + }, +}; + +mod transaction_test_info; + +/// VM tester that provides prefunded accounts, storage handle etc. +#[derive(Debug)] +pub(crate) struct VmTester { + pub(crate) vm: VM, + pub(crate) system_env: SystemEnv, + pub(crate) l1_batch_env: L1BatchEnv, + pub(crate) storage: StoragePtr>, + pub(crate) test_contract: Option
, + pub(crate) rich_accounts: Vec, +} + +impl VmTester { + pub(crate) fn deploy_test_contract(&mut self) { + let contract = read_test_contract(); + let account = &mut self.rich_accounts[0]; + let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; + let nonce = tx.nonce().unwrap().0.into(); + self.vm.push_transaction(tx); + self.vm.execute(InspectExecutionMode::OneTx); + let deployed_address = deployed_address_create(account.address, nonce); + self.test_contract = Some(deployed_address); + } + + pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { + self.vm.read_storage(storage_key_for_eth_balance(&address)) + } + + pub(crate) fn reset_with_empty_storage(&mut self) { + let mut storage = get_empty_storage(); + for account in &self.rich_accounts { + make_address_rich(&mut storage, account.address); + } + + let storage = StorageView::new(storage).to_rc_ptr(); + self.storage = storage.clone(); + self.vm = VM::new(self.l1_batch_env.clone(), self.system_env.clone(), storage); + } +} + +/// Builder for [`VmTester`]. +#[derive(Debug)] +pub(crate) struct VmTesterBuilder { + storage: Option, + l1_batch_env: Option, + system_env: SystemEnv, + rich_accounts: Vec, + custom_contracts: Vec, +} + +impl VmTesterBuilder { + pub(crate) fn new() -> Self { + Self { + storage: None, + l1_batch_env: None, + system_env: default_system_env(), + rich_accounts: vec![], + custom_contracts: vec![], + } + } + + pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { + self.system_env = system_env; + self + } + + pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { + self.l1_batch_env = Some(l1_batch_env); + self + } + + pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { + self.storage = Some(storage); + self + } + + pub(crate) fn with_base_system_smart_contracts( + mut self, + base_system_smart_contracts: BaseSystemContracts, + ) -> Self { + self.system_env.base_system_smart_contracts = base_system_smart_contracts; + self + } + + pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { + self.system_env.bootloader_gas_limit = gas_limit; + self + } + + pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { + self.system_env.execution_mode = execution_mode; + self + } + + pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { + self.storage = Some(get_empty_storage()); + self + } + + /// Creates the specified number of pre-funded accounts. + pub(crate) fn with_rich_accounts(mut self, number: u32) -> Self { + for i in 0..number { + self.rich_accounts.push(Account::from_seed(i)); + } + self + } + + pub(crate) fn rich_account(&self, index: usize) -> &Account { + &self.rich_accounts[index] + } + + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + self.custom_contracts = contracts; + self + } + + pub(crate) fn build(self) -> VmTester + where + VM: VmFactory>, + { + let l1_batch_env = self + .l1_batch_env + .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); + + let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); + ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); + let storage = StorageView::new(raw_storage).to_rc_ptr(); + for account in &self.rich_accounts { + make_address_rich(storage.borrow_mut().inner_mut(), account.address); + } + + let vm = VM::new( + l1_batch_env.clone(), + self.system_env.clone(), + storage.clone(), + ); + VmTester { + vm, + system_env: self.system_env, + l1_batch_env, + storage, + test_contract: None, + rich_accounts: self.rich_accounts.clone(), + } + } +} + +/// Test extensions for VM. +pub(crate) trait TestedVm: + VmFactory> + VmInterfaceHistoryEnabled +{ + type StateDump: fmt::Debug + PartialEq; + + fn dump_state(&self) -> Self::StateDump; + + fn gas_remaining(&mut self) -> u32; + + fn get_current_execution_state(&self) -> CurrentExecutionState; + + /// Unlike [`Self::known_bytecode_hashes()`], the output should only include successfully decommitted bytecodes. + fn decommitted_hashes(&self) -> HashSet; + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs; + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs; + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]); + + /// Includes bytecodes that have failed to decommit. Should exclude base system contract bytecodes (default AA / EVM emulator). + fn known_bytecode_hashes(&self) -> HashSet; + + /// Returns `true` iff the decommit is fresh. + fn manually_decommit(&mut self, code_hash: H256) -> bool; + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]); + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]); + + /// Reads storage accounting for changes made during the VM run. + fn read_storage(&mut self, key: StorageKey) -> U256; + + fn verify_required_storage(&mut self, cells: &[(StorageKey, U256)]) { + for &(key, expected_value) in cells { + assert_eq!( + self.read_storage(key), + expected_value, + "Unexpected storage value at {key:?}" + ); + } + } + + /// Returns the current hash of the latest L2 block. + fn last_l2_block_hash(&self) -> H256; + + /// Same as `start_new_l2_block`, but should skip consistency checks (to verify they are performed by the bootloader). + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv); + + /// Pushes a transaction with predefined refund value. + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs similarity index 87% rename from core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs rename to core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs index 5ad3376b2ec..222fb3b7331 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs @@ -1,12 +1,12 @@ use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160}; +use super::{TestedVm, VmTester}; use crate::{ interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmRevertReason, + CurrentExecutionState, ExecutionResult, Halt, InspectExecutionMode, TxRevertReason, + VmExecutionResultAndLogs, VmInterfaceExt, VmRevertReason, }, - vm_latest::{tests::tester::vm_tester::VmTester, HistoryEnabled}, + versions::testonly::default_pubdata_builder, }; // FIXME: remove the dead code allow @@ -184,9 +184,7 @@ impl TransactionTestInfo { } } -impl VmTester { - // FIXME: remove allow dead code - #[allow(dead_code)] +impl VmTester { pub(crate) fn execute_and_verify_txs( &mut self, txs: &[TransactionTestInfo], @@ -194,7 +192,7 @@ impl VmTester { for tx_test_info in txs { self.execute_tx_and_verify(tx_test_info.clone()); } - self.vm.execute(VmExecutionMode::Batch); + self.vm.finish_batch(default_pubdata_builder()); let mut state = self.vm.get_current_execution_state(); state.used_contract_hashes.sort(); state @@ -204,19 +202,29 @@ impl VmTester { &mut self, tx_test_info: TransactionTestInfo, ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result + execute_tx_and_verify(&mut self.vm, tx_test_info) + } +} + +fn execute_tx_and_verify( + vm: &mut impl TestedVm, + tx_test_info: TransactionTestInfo, +) -> VmExecutionResultAndLogs { + let inner_state_before = vm.dump_state(); + vm.make_snapshot(); + vm.push_transaction(tx_test_info.tx.clone()); + let result = vm.execute(InspectExecutionMode::OneTx); + tx_test_info.verify_result(&result); + if tx_test_info.should_rollback() { + vm.rollback_to_the_latest_snapshot(); + let inner_state_after = vm.dump_state(); + pretty_assertions::assert_eq!( + inner_state_before, + inner_state_after, + "Inner state before and after rollback should be equal" + ); + } else { + vm.pop_snapshot_no_rollback(); } + result } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs similarity index 55% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs rename to core/lib/multivm/src/versions/testonly/tracing_execution_error.rs index 138e8041e6a..e87e6eb7c06 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs @@ -1,33 +1,45 @@ -use zksync_types::{Execute, H160}; +use zksync_contracts::load_contract; +use zksync_types::{Address, Execute}; +use super::{ + read_error_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, BASE_SYSTEM_CONTRACTS, +}; use crate::{ interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_2::tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::tester::{ExpectedError, TransactionTestInfo}, }; -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) +fn get_execute_error_calldata() -> Vec { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ); + let function = test_contract.function("require_short").unwrap(); + function + .encode_input(&[]) + .expect("failed to encode parameters") +} + +pub(crate) fn test_tracing_of_execution_errors() { + let contract_address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new( + read_error_contract(), + contract_address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), - factory_deps: Some(vec![]), + factory_deps: vec![], }, None, ); diff --git a/core/lib/multivm/src/versions/testonly/transfer.rs b/core/lib/multivm/src/versions/testonly/transfer.rs new file mode 100644 index 00000000000..3572adba147 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/transfer.rs @@ -0,0 +1,208 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_types::{utils::storage_key_for_eth_balance, Address, Execute, U256}; +use zksync_utils::u256_to_h256; + +use super::{ + default_pubdata_builder, get_empty_storage, tester::VmTesterBuilder, ContractToDeploy, TestedVm, +}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; + +enum TestOptions { + Send(U256), + Transfer(U256), +} + +fn test_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let recipient_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + + let test_contract_address = Address::repeat_byte(1); + let recipient_address = Address::repeat_byte(2); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + }; + + let mut storage = get_empty_storage(); + storage.set_value( + storage_key_for_eth_balance(&test_contract_address), + u256_to_h256(value), + ); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(recipient_bytecode, recipient_address), + ]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let tx_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !tx_result.result.is_failed(), + "Transaction wasn't successful" + ); + + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); + + let new_recipient_balance = vm.get_eth_balance(recipient_address); + assert_eq!(new_recipient_balance, value); +} + +pub(crate) fn test_send_and_transfer() { + test_send_or_transfer::(TestOptions::Send(U256::zero())); + test_send_or_transfer::(TestOptions::Send(U256::from(10).pow(18.into()))); + test_send_or_transfer::(TestOptions::Transfer(U256::zero())); + test_send_or_transfer::(TestOptions::Transfer(U256::from(10).pow(18.into()))); +} + +fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipient_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipient_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + + let test_contract_address = Address::repeat_byte(1); + let reentrant_recipient_address = Address::repeat_byte(2); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipient_address), + Token::Uint(value), + ]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipient_address), + Token::Uint(value), + ]) + .unwrap(), + ), + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), + ]) + .build::(); + + // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. + let account = &mut vm.rich_accounts[0]; + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(reentrant_recipient_address), + calldata: reentrant_recipient_abi + .function("setX") + .unwrap() + .encode_input(&[]) + .unwrap(), + value: U256::from(1), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let tx1_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !tx1_result.result.is_failed(), + "Transaction 1 wasn't successful" + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata, + value, + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx2); + let tx2_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + tx2_result.result.is_failed(), + "Transaction 2 should have failed, but it succeeded" + ); + + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +} + +pub(crate) fn test_reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_or_transfer::(TestOptions::Send(U256::zero())); + test_reentrancy_protection_send_or_transfer::(TestOptions::Send( + U256::from(10).pow(18.into()), + )); + test_reentrancy_protection_send_or_transfer::(TestOptions::Transfer(U256::zero())); + test_reentrancy_protection_send_or_transfer::(TestOptions::Transfer( + U256::from(10).pow(18.into()), + )); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs b/core/lib/multivm/src/versions/testonly/upgrade.rs similarity index 72% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs rename to core/lib/multivm/src/versions/testonly/upgrade.rs index 2af2928b1c4..359f19faedb 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/testonly/upgrade.rs @@ -1,52 +1,43 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; use zksync_test_account::TxType; use zksync_types::{ ethabi::{Contract, Token}, get_code_key, get_known_code_key, protocol_upgrade::ProtocolUpgradeTxCommonData, Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, + CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H256, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, }; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_2::tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use super::{ + get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, read_test_contract, + tester::VmTesterBuilder, TestedVm, +}; +use crate::interface::{ + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, }; /// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: /// - This transaction must be the only one in block /// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - +pub(crate) fn test_protocol_upgrade_is_first() { + let mut storage = get_empty_storage(); let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); // Here we just use some random transaction of protocol upgrade type: let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { // The bytecode hash to put on an address bytecode_hash, // The address on which to deploy the bytecode hash to - address: H160::random(), + address: Address::repeat_byte(1), // Whether to run the constructor on the force deployment call_constructor: false, // The value with which to initialize a contract @@ -60,7 +51,7 @@ fn test_protocol_upgrade_is_first() { // The bytecode hash to put on an address bytecode_hash, // The address on which to deploy the bytecode hash to - address: H160::random(), + address: Address::repeat_byte(2), // Whether to run the constructor on the force deployment call_constructor: false, // The value with which to initialize a contract @@ -82,9 +73,9 @@ fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(normal_l1_transaction.clone()); vm.vm.push_transaction(another_protocol_upgrade_transaction); - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( result.result, ExecutionResult::Halt { @@ -98,8 +89,8 @@ fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(normal_l1_transaction.clone()); vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( result.result, ExecutionResult::Halt { @@ -112,31 +103,26 @@ fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(protocol_upgrade_transaction); vm.vm.push_transaction(normal_l1_transaction); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); } /// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); +pub(crate) fn test_force_deploy_upgrade() { + let mut storage = get_empty_storage(); let bytecode_hash = hash_bytecode(&read_test_contract()); - let known_code_key = get_known_code_key(&bytecode_hash); // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); + storage.set_value(known_code_key, u256_to_h256(1.into())); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); - let address_to_deploy = H160::random(); + let address_to_deploy = Address::repeat_byte(1); // Here we just use some random transaction of protocol upgrade type: let transaction = get_forced_deploy_tx(&[ForceDeployment { // The bytecode hash to put on an address @@ -153,65 +139,46 @@ fn test_force_deploy_upgrade() { vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "The force upgrade was not successful" ); - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - + let expected_slots = [( + get_code_key(&address_to_deploy), + h256_to_u256(bytecode_hash), + )]; // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); + vm.vm.verify_required_storage(&expected_slots); } -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - +/// Here we show how the work with the complex upgrader could be done. +pub(crate) fn test_complex_upgrader() { + let mut storage = get_empty_storage(); let bytecode_hash = hash_bytecode(&read_complex_upgrade()); let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - // Let's assume that the bytecode for the implementation of the complex upgrade // is already deployed in some address in user space - let upgrade_impl = H160::random(); + let upgrade_impl = Address::repeat_byte(1); let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage.set_value( get_known_code_key(&msg_sender_test_hash), u256_to_h256(1.into()), ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); + storage.set_value(account_code_key, bytecode_hash); + storage.store_factory_dep(bytecode_hash, read_complex_upgrade()); + storage.store_factory_dep(msg_sender_test_hash, read_msg_sender_test()); - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let address_to_deploy1 = Address::repeat_byte(0xfe); + let address_to_deploy2 = Address::repeat_byte(0xff); let transaction = get_complex_upgrade_tx( upgrade_impl, @@ -221,19 +188,24 @@ fn test_complex_upgrader() { ); vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "The force upgrade was not successful" ); - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), + let expected_slots = [ + ( + get_code_key(&address_to_deploy1), + h256_to_u256(bytecode_hash), + ), + ( + get_code_key(&address_to_deploy2), + h256_to_u256(bytecode_hash), + ), ]; - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); + vm.vm.verify_required_storage(&expected_slots); } #[derive(Debug, Clone)] @@ -274,9 +246,9 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, - factory_deps: None, + factory_deps: vec![], value: U256::zero(), }; @@ -324,9 +296,9 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, - factory_deps: None, + factory_deps: vec![], value: U256::zero(), }; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs index da4e2f5350f..7870b1ff744 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_3::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -221,13 +221,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - pub(crate) fn calculate_computational_gas_used< S: WriteStorage, T: PubdataSpentTracer, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 5692f103da3..d9768652c2f 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -1,14 +1,15 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::Transaction; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, - L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, + L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, @@ -22,22 +23,45 @@ pub struct Vm { pub(crate) system_env: SystemEnv, } +impl Vm { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } +} + impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { - crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( - &mut self.vm, - &tx, - self.system_env.execution_mode.glue_into(), - None, - ) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + let compressed_bytecodes = + crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( + &mut self.vm, + &tx, + self.system_env.execution_mode.glue_into(), + None, + ); + PushTransactionResult { + compressed_bytecodes: compressed_bytecodes.into(), + } } fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -46,7 +70,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => { + InspectExecutionMode::OneTx => { match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer @@ -69,8 +93,7 @@ impl VmInterface for Vm { .glue_into(), } } - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -160,24 +183,7 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_1_3_2::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs index d1acdf7708e..fd4d483fba5 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs @@ -442,7 +442,7 @@ pub fn get_bootloader_memory( let mut previous_compressed: usize = 0; let mut already_included_txs_size = 0; for (tx_index_in_block, tx) in txs.into_iter().enumerate() { - let compressed_bytecodes = predefined_compressed_bytecodes[tx_index_in_block].clone(); + let compressed_bytecodes = &predefined_compressed_bytecodes[tx_index_in_block]; let mut total_compressed_len_words = 0; for i in compressed_bytecodes.iter() { @@ -475,7 +475,7 @@ pub fn push_transaction_to_bootloader_memory( tx: &Transaction, execution_mode: TxExecutionMode, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx: TransactionData = tx.clone().into(); let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); @@ -485,7 +485,7 @@ pub fn push_transaction_to_bootloader_memory( execution_mode, overhead, explicit_compressed_bytecodes, - ); + ) } pub fn push_raw_transaction_to_bootloader_memory( @@ -494,7 +494,7 @@ pub fn push_raw_transaction_to_bootloader_memory>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -555,7 +555,7 @@ pub fn push_raw_transaction_to_bootloader_memory, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); @@ -604,7 +605,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( predefined_overhead: u32, trusted_gas_limit: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let mut memory: Vec<(usize, U256)> = Vec::default(); let bootloader_description_offset = @@ -640,8 +641,8 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; let memory_addition: Vec<_> = compressed_bytecodes - .into_iter() - .flat_map(|x| bytecode::encode_call(&x)) + .iter() + .flat_map(bytecode::encode_call) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs index 393eb043cb7..1acf75b27e1 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs index 2160c4b56a0..cc199fef941 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs @@ -99,6 +99,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs index 71ae20d4406..3a3b22ea246 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs deleted file mode 100644 index ba699e7558b..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs +++ /dev/null @@ -1,284 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::{ - aux_structures::Timestamp, zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK, -}; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_BATCH_TIP_OVERHEAD, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - HistoryEnabled, TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -fn execute_test(test_data: L1MessengerTestData) -> u32 { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs, - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!(!result.result.is_failed(), "Batch wasn't successful"); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - ergs_before - ergs_after -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -#[test] -fn test_dry_run_upper_bound() { - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let max_logs = execute_test(L1MessengerTestData { - l2_to_l1_logs: L2ToL1Log::MIN_L2_L1_LOGS_TREE_SIZE, - ..Default::default() - }); - - let max_messages = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; 0]; MAX_PUBDATA_PER_BLOCK as usize / L2ToL1Log::SERIALIZED_SIZE], - ..Default::default() - }); - - let long_message = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize]; 1], - ..Default::default() - }); - - let max_bytecodes = execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long - bytecodes: vec![vec![0; 32]; MAX_PUBDATA_PER_BLOCK as usize / 32], - ..Default::default() - }); - - let long_bytecode = execute_test(L1MessengerTestData { - // We have to add 48 since a valid bytecode must have an odd number of 32 byte words - bytecodes: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize + 48]; 1], - ..Default::default() - }); - - let lots_of_small_repeated_writes = execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_PUBDATA_PER_BLOCK as usize / 5), - ..Default::default() - }); - - let lots_of_big_repeated_writes = execute_test(L1MessengerTestData { - // Each big write will approximately require 32 bytes to encode - state_diffs: generate_state_diffs(true, false, MAX_PUBDATA_PER_BLOCK as usize / 32), - ..Default::default() - }); - - let lots_of_small_initial_writes = execute_test(L1MessengerTestData { - // Each initial write will take at least 32 bytes for derived key + 5 bytes for value - state_diffs: generate_state_diffs(false, true, MAX_PUBDATA_PER_BLOCK as usize / 37), - ..Default::default() - }); - - let lots_of_large_initial_writes = execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_PUBDATA_PER_BLOCK as usize / 64), - ..Default::default() - }); - - let max_used_gas = vec![ - max_logs, - max_messages, - long_message, - max_bytecodes, - long_bytecode, - lots_of_small_repeated_writes, - lots_of_big_repeated_writes, - lots_of_small_initial_writes, - lots_of_large_initial_writes, - ] - .into_iter() - .max() - .unwrap(); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - assert!( - max_used_gas * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs deleted file mode 100644 index 47e047ebbf7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs deleted file mode 100644 index 9db5e7326e7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs deleted file mode 100644 index 1a4c026a23f..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs deleted file mode 100644 index ecc2fdfe6c0..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled}, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.00226, 0.00077, 0.1195, 0.1429, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs deleted file mode 100644 index be8e253c6d8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs +++ /dev/null @@ -1,78 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs deleted file mode 100644 index 9dfda9e1a68..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs deleted file mode 100644 index a7cbcd8e295..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs deleted file mode 100644 index 75517138db3..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_1::tests::tester::VmTesterBuilder; -use crate::vm_1_4_1::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_1::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs deleted file mode 100644 index 7644064f4af..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs deleted file mode 100644 index 83e0f1715b8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs +++ /dev/null @@ -1,189 +0,0 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} - -#[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS)DRESS)DRESS), - value: 0.into(), - factory_deps: None, - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs deleted file mode 100644 index a07608121bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs deleted file mode 100644 index 915a802b1e8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs deleted file mode 100644 index 37e871fbc70..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs deleted file mode 100644 index 8700eb14b53..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs +++ /dev/null @@ -1,166 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs deleted file mode 100644 index aebc956e673..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs deleted file mode 100644 index 2ae942c2652..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs deleted file mode 100644 index 384bc4cf325..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs deleted file mode 100644 index 11e9d7fd6df..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_1::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d980..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs deleted file mode 100644 index 443acf71676..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_1::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs deleted file mode 100644 index 24bd0b4d0bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs deleted file mode 100644 index 02c7590c1be..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_1::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs deleted file mode 100644 index af3701d919f..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs +++ /dev/null @@ -1,355 +0,0 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs deleted file mode 100644 index da69c107a20..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_1::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs index 238804bc7fc..6f927c5c99a 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_1::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_1_4_1::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs index d07732ae435..c1ca93152a0 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 68c8e92a03a..af483feedd7 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -1,17 +1,19 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_1::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_1::{ @@ -82,18 +84,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -124,12 +131,12 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs index 600ab83bf48..182f6eff441 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs index d42d1880933..f6e49cd8b14 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs @@ -96,6 +96,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs index 92a2eaa650c..754b8476182 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs deleted file mode 100644 index 8578b73ccfa..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs +++ /dev/null @@ -1,399 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful for input: {:?}", - test_data - ); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used - ); - - TestStatistics { - max_used_gas: ergs_before - ergs_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} - -#[test] -#[allow(clippy::vec_init_then_push)] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let mut statistics = Vec::new(); - - // max logs - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }); - - // max messages - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }); - - // long message - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }); - - // max bytecodes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }); - - // long bytecode - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; 1], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }); - - // lots of small repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }); - - // lots of big repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(true, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }); - - // lots of small initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs(false, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }); - - // lots of large initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs deleted file mode 100644 index dd91d6d94a9..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs +++ /dev/null @@ -1,40 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs deleted file mode 100644 index 2fafb7e51aa..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs deleted file mode 100644 index b84e9d32126..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs +++ /dev/null @@ -1,44 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs deleted file mode 100644 index cfe3e1bfc23..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs deleted file mode 100644 index c79fcd8ba8e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_2::tests::tester::VmTesterBuilder; -use crate::vm_1_4_2::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_2::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs deleted file mode 100644 index f722890f474..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs deleted file mode 100644 index a07608121bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs deleted file mode 100644 index 9f1be4ec947..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs +++ /dev/null @@ -1,187 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_2::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs deleted file mode 100644 index 0a799288204..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs +++ /dev/null @@ -1,135 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs deleted file mode 100644 index 5586450f34b..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_test_account::TxType; -use zksync_types::{utils::deployed_address_create, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::PrestateTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, -}; - -#[test] -fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - vm.deploy_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm.test_contract.unwrap(), - false, - Default::default(), - true, - TxType::L2, - ); - vm.vm.push_transaction(tx1); - - let contract_address = vm.test_contract.unwrap(); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - assert!(prestate_result.1.contains_key(&contract_address)); -} - -#[test] -fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); - vm.test_contract = Some(deployed_address); - - // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce2 = tx2.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); - - let account = &mut vm.rich_accounts[0]; - - //enter ether to contract to see difference in the balance post execution - let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), - calldata: Default::default(), - value: U256::from(100000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); - - let tx1 = Execute { - contract_address: deployed_address2, - calldata: Default::default(), - value: U256::from(200000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx1, None)); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - //assert that the pre-state contains both deployed contracts with balance zero - assert!(prestate_result.0.contains_key(&deployed_address)); - assert!(prestate_result.0.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.0[&deployed_address].balance, - Some(U256::zero()) - ); - assert_eq!( - prestate_result.0[&deployed_address2].balance, - Some(U256::zero()) - ); - - //assert that the post-state contains both deployed contracts with the correct balance - assert!(prestate_result.1.contains_key(&deployed_address)); - assert!(prestate_result.1.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.1[&deployed_address].balance, - Some(U256::from(100000)) - ); - assert_eq!( - prestate_result.1[&deployed_address2].balance, - Some(U256::from(200000)) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs deleted file mode 100644 index 401c2c12a43..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm.push_raw_transaction( - tx.clone(), - overhead, - result.refunds.gas_refunded as u32, - true, - ); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund as u32, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs deleted file mode 100644 index 2ce18cc0136..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs deleted file mode 100644 index d6c072d1b1e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_2::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d980..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs deleted file mode 100644 index cb81c4c5ed7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_2::tests::tester::vm_tester::VmTester, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs deleted file mode 100644 index 44f861f8d33..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs deleted file mode 100644 index 5655e90fb4e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_2::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs index ffe65b5e050..6c4f737f9e9 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_1::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_1_4_2::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs index d07732ae435..c1ca93152a0 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index d6e1fbc68a8..e7c8e7acdd9 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -1,19 +1,19 @@ -use std::mem; +use std::{mem, rc::Rc}; use circuit_sequencer_api_1_4_2::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_2::{ @@ -84,18 +84,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(mem::take(tracer), execution_mode, None) + self.inspect_inner(mem::take(tracer), execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -126,12 +131,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(TracerDispatcher::default(), VmExecutionMode::Batch, None); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs index 1a1c620c2b2..c97d3ff30e4 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs index 79669eddd56..b8b939f8673 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs @@ -93,6 +93,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs index 46f8bc2f400..015d5acd340 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs deleted file mode 100644 index 57229abb097..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs deleted file mode 100644 index ad1b0f26036..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs deleted file mode 100644 index e9df4fa80ff..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs deleted file mode 100644 index b0cffa7d3c8..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs +++ /dev/null @@ -1,66 +0,0 @@ -use circuit_sequencer_api_1_4_0::geometry_config::get_geometry_config; -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let statistic = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - assert!(statistic.main_vm > f32::EPSILON); - assert!(statistic.ram_permutation > f32::EPSILON); - assert!(statistic.storage_application > f32::EPSILON); - assert!(statistic.storage_sorter > f32::EPSILON); - assert!(statistic.code_decommitter > f32::EPSILON); - assert!(statistic.code_decommitter_sorter > f32::EPSILON); - assert!(statistic.log_demuxer > f32::EPSILON); - assert!(statistic.events_sorter > f32::EPSILON); - assert!(statistic.keccak256 > f32::EPSILON); - // Single `ecrecover` should be used to validate tx signature. - assert_eq!( - statistic.ecrecover, - 1.0 / get_geometry_config().cycles_per_ecrecover_circuit as f32 - ); - // `sha256` shouldn't be used. - assert_eq!(statistic.sha256, 0.0); - - const EXPECTED_CIRCUITS_USED: f32 = 4.6363; - let delta = (statistic.total_f32() - EXPECTED_CIRCUITS_USED) / EXPECTED_CIRCUITS_USED; - - if delta.abs() > 0.1 { - panic!( - "Estimation differs from expected result by too much: {}%, expected value: {}, got {}", - delta * 100.0, - EXPECTED_CIRCUITS_USED, - statistic.total_f32(), - ); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs deleted file mode 100644 index a8c20cfebc1..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs +++ /dev/null @@ -1,76 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs deleted file mode 100644 index 637fd94c1c8..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs deleted file mode 100644 index 658bcd75b05..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs deleted file mode 100644 index 079e6d61b6c..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_boojum_integration::tests::tester::VmTesterBuilder; -use crate::vm_boojum_integration::types::inputs::system_env::TxExecutionMode; -use crate::vm_boojum_integration::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs deleted file mode 100644 index 67901490edf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs deleted file mode 100644 index b547f346d28..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs +++ /dev/null @@ -1,139 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rolling hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in L1Messenger - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs deleted file mode 100644 index d637d583c0e..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_0::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs deleted file mode 100644 index 95377232b3e..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs deleted file mode 100644 index 44ba3e4e323..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs deleted file mode 100644 index 516331d574f..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_0::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 sha256 calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 ecrecover call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs deleted file mode 100644 index 521bd81f2ef..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs +++ /dev/null @@ -1,167 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs deleted file mode 100644 index 90c3206b24b..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs deleted file mode 100644 index cfaf1952c70..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_0::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs deleted file mode 100644 index f6b1d83e02a..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs deleted file mode 100644 index 078a971e4bf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_0::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::{ - vm_boojum_integration::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs deleted file mode 100644 index 4d6572fe78a..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_boojum_integration::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs deleted file mode 100644 index fcea03e12cc..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs +++ /dev/null @@ -1,295 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs deleted file mode 100644 index 8c538dcf9bf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_boojum_integration::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs deleted file mode 100644 index bc3d62f62a1..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs +++ /dev/null @@ -1,362 +0,0 @@ -use zk_evm_1_4_0::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::read_test_contract; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{tester::VmTesterBuilder, utils::verify_required_storage}, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs deleted file mode 100644 index 4fba188ac5b..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs +++ /dev/null @@ -1,111 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_boojum_integration::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs index 326a5789612..2f7d141cb0a 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_0::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_boojum_integration::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs index 9df9009831f..152ccad2fbc 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 17ce8365a0a..43c9900486d 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -1,17 +1,19 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_boojum_integration::{ @@ -82,18 +84,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -125,12 +132,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs index f280f56a828..770f232019b 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -171,8 +171,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs index b48ec7eacb0..f588f20ab25 100644 --- a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -1,5 +1,5 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_vm2::interface::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm2::interface::{CycleStats, GlobalStateInterface, Opcode, OpcodeType, Tracer}; use zksync_vm_interface::CircuitStatistic; use crate::vm_latest::tracers::circuits_capacity::*; @@ -24,7 +24,7 @@ pub struct CircuitsTracer { } impl Tracer for CircuitsTracer { - fn after_instruction(&mut self, _state: &mut S) { + fn after_instruction(&mut self, _: &mut S) { self.main_vm_cycles += 1; match OP::VALUE { diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs index d4804a98c25..733ca9d82fc 100644 --- a/core/lib/multivm/src/versions/vm_fast/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -12,7 +12,7 @@ mod initial_bootloader_memory; mod pubdata; mod refund; // FIXME(EVM-711): restore tests for fast VM once it is integrated -// #[cfg(test)] -// mod tests; +#[cfg(test)] +mod tests; mod transaction_data; mod vm; diff --git a/core/lib/multivm/src/versions/vm_fast/pubdata.rs b/core/lib/multivm/src/versions/vm_fast/pubdata.rs index d07732ae435..c1ca93152a0 100644 --- a/core/lib/multivm/src/versions/vm_fast/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_fast/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index dd407c61668..bb66eb2f770 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -1,392 +1,6 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use itertools::Itertools; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, - l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use super::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{get_complex_upgrade_abi, read_complex_upgrade}, -}; -use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::default_l1_batch, - vm_latest::constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -const CALLS_PER_TX: usize = 1_000; -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .chunks(CALLS_PER_TX) - .into_iter() - .map(|chunk| { - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(chunk.collect_vec())]) - .unwrap() - }) - .collect_vec(); - - encoded_calls -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute - // the gas limit - let batch_env = L1BatchEnv { - fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), - ..default_l1_batch(zksync_types::L1BatchNumber(1)) - }; - - let mut vm = VmTesterBuilder::new() - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_l1_batch_env(batch_env) - .build(); - - let bytecodes = test_data.bytecodes.iter().map(Vec::as_slice); - vm.vm.insert_bytecodes(bytecodes); - - let txs_data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - - for (i, data) in txs_data.into_iter().enumerate() { - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), - calldata: data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {test_data:#?}" - ); - } - - // Now we count how much gas was spent at the end of the batch - // It is assumed that the top level frame is the bootloader - vm.vm.enforce_state_diffs(test_data.state_diffs.clone()); - let gas_before = vm.vm.gas_remaining(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {test_data:?}" - ); - let gas_after = vm.vm.gas_remaining(); - assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); - - TestStatistics { - max_used_gas: gas_before - gas_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} +use crate::{versions::testonly::block_tip::test_dry_run_upper_bound, vm_fast::Vm}; #[test] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let statistics = vec![ - // max logs - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }, - // max messages - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }, - // long message - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }, - // max bytecodes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }, - // long bytecode - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![ - vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; - 1 - ], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }, - // lots of small repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }, - // lots of big repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - true, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, - ), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }, - // lots of small initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs( - false, - true, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, - ), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }, - // lots of large initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - false, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, - ), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }, - ]; - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); +fn dry_run_upper_bound() { + test_dry_run_upper_bound::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index 48e1b10de44..6075aea0989 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,52 +1,14 @@ -use assert_matches::assert_matches; -use zksync_types::U256; -use zksync_vm2::interface::HeapId; - use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, - versions::vm_fast::tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::bootloader::{test_bootloader_out_of_gas, test_dummy_bootloader}, + vm_fast::Vm, }; #[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - - verify_required_memory(&vm.vm.inner, vec![(correct_first_cell, HeapId::FIRST, 0)]); +fn dummy_bootloader() { + test_dummy_bootloader::>(); } #[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); +fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs index 3070140c00b..8a662c38827 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs @@ -1,38 +1,6 @@ -use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::bytecode, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; +use crate::{versions::testonly::bytecode_publishing::test_bytecode_publishing, vm_fast::Vm}; #[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); +fn bytecode_publishing() { + test_bytecode_publishing::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs deleted file mode 100644 index c97b38b6afc..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index f40e5336eb3..e7521d87c1c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -1,74 +1,6 @@ -use zksync_types::{Address, Execute, U256}; +use crate::{versions::testonly::circuits::test_circuits, vm_fast::Vm}; -use super::tester::VmTesterBuilder; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. #[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(Address::random()), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed(), "{res:#?}"); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 13] = [ - 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, - 0.0, 0.0, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - (s.secp256k1_verify, "secp256k1_verify"), - (s.transient_storage_checker, "transient_storage_checker"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } +fn circuits() { + test_circuits::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 34342d7f3b8..4ef86128734 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -1,252 +1,21 @@ -use ethabi::Token; -use zksync_types::{ - get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, - }, - CircuitsTracer, + versions::testonly::code_oracle::{ + test_code_oracle, test_code_oracle_big_bytecode, test_refunds_in_code_oracle, }, + vm_fast::Vm, }; -fn generate_large_bytecode() -> Vec { - // This is the maximal possible size of a zkEVM bytecode - vec![2u8; ((1 << 16) - 1) * 32] -} - #[test] -fn test_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - // Filling the zkevm bytecode - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode, - precompiles_contract_address, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // Now, we ask for the same bytecode. We use to partially check whether the memory page with - // the decommitted bytecode gets erased (it shouldn't). - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); -} - -fn find_code_oracle_cost_log( - precompiles_contract_address: Address, - logs: &[StorageLogWithPreviousValue], -) -> &StorageLogWithPreviousValue { - logs.iter() - .find(|log| { - *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() - }) - .expect("no code oracle cost log") +fn code_oracle() { + test_code_oracle::>(); } #[test] -fn test_code_oracle_big_bytecode() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let big_zkevm_bytecode = generate_large_bytecode(); - let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); - let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); - - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&big_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode, - precompiles_contract_address, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.insert_bytecodes([big_zkevm_bytecode.as_slice()]); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); +fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::>(); } #[test] fn refunds_in_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - // Execute code oracle twice with identical VM state that only differs in that the queried bytecode - // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas - // for already decommitted codes). - let mut oracle_costs = vec![]; - for decommit in [false, true] { - let mut vm = VmTesterBuilder::new() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode.clone(), - precompiles_contract_address, - )]) - .with_storage(storage.clone()) - .build(); - - vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); - - let account = &mut vm.rich_accounts[0]; - if decommit { - let (_, is_fresh) = vm.vm.inner.world_diff_mut().decommit_opcode( - &mut vm.vm.world, - &mut ((), CircuitsTracer::default()), - h256_to_u256(normal_zkevm_bytecode_hash), - ); - assert!(is_fresh); - } - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - let log = - find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); - oracle_costs.push(log.log.value); - } - - // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` - // in `CodeOracle.yul`. - let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); - assert_eq!( - code_oracle_refund, - (4 * (normal_zkevm_bytecode.len() / 32)).into() - ); + test_refunds_in_code_oracle::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs index c2ce02d39fe..c3cfd8b29f3 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs @@ -1,81 +1,6 @@ -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - vm_latest::utils::fee::get_batch_base_fee, -}; +use crate::{versions::testonly::default_aa::test_default_aa_interaction, vm_fast::Vm}; #[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = [ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage( - &expected_slots, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &vm.fee_account, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); +fn default_aa_interaction() { + test_default_aa_interaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index 3f0a47b980e..6ba55f8e1f8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -1,39 +1,6 @@ -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Execute}; +use crate::{versions::testonly::gas_limit::test_tx_gas_limit_offset, vm_fast::Vm}; -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_fast::tests::tester::VmTesterBuilder, - vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. #[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Account::default_fee() - }), - ); - - vm.vm.push_transaction(tx); - - assert!(!vm.vm.has_previous_far_calls()); - let gas_limit_from_memory = vm - .vm - .read_word_from_bootloader_heap(TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET); - - assert_eq!(gas_limit_from_memory, gas_limit); +fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 62fa82f52f2..5ec30907ed5 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -1,235 +1,22 @@ -use std::{collections::HashSet, iter}; - -use assert_matches::assert_matches; -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - use crate::{ - interface::{ - storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, - }, - versions::testonly::ContractToDeploy, - vm_fast::{ - tests::{ - tester::{TxType, VmTester, VmTesterBuilder}, - utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - vm::Vm, + versions::testonly::get_used_contracts::{ + test_get_used_contracts, test_get_used_contracts_with_far_call, + test_get_used_contracts_with_out_of_gas_far_call, }, + vm_fast::Vm, }; #[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_decommitted_hashes()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .decommitted_hashes() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm.decommitted_hashes().collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata: big_calldata, - value: Default::default(), - factory_deps: vec![vec![1; 32]], - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm).contains(&hash_to_u256)); - assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet { - let mut known_bytecodes_without_aa_code = vm - .world - .bytecode_cache - .keys() - .cloned() - .collect::>(); - known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - known_bytecodes_without_aa_code -} - -/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial -/// decommitment cost (>10,000 gas). -fn inflated_counter_bytecode() -> Vec { - let mut counter_bytecode = read_test_contract(); - counter_bytecode.extend( - iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) - .take(10_000) - .flatten(), - ); - counter_bytecode -} - -#[derive(Debug)] -struct ProxyCounterData { - proxy_counter_address: Address, - counter_bytecode_hash: U256, -} - -fn execute_proxy_counter(gas: u32) -> (VmTester<()>, ProxyCounterData, VmExecutionResultAndLogs) { - let counter_bytecode = inflated_counter_bytecode(); - let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); - let counter_address = Address::repeat_byte(0x23); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_custom_contracts(vec![ContractToDeploy::new( - counter_bytecode, - counter_address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx( - &proxy_counter_bytecode, - Some(&[Token::Address(counter_address)]), - TxType::L2, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - !decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(deploy_tx.address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - let data = ProxyCounterData { - proxy_counter_address: deploy_tx.address, - counter_bytecode_hash, - }; - (vm, data, exec_result) +fn get_used_contracts() { + test_get_used_contracts::>(); } #[test] fn get_used_contracts_with_far_call() { - let (vm, data, exec_result) = execute_proxy_counter(100_000); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - decommitted_hashes.contains(&data.counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_far_call::>(); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (mut vm, data, exec_result) = execute_proxy_counter(10_000); - assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - decommitted_hashes.contains(&data.counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - // Execute another transaction with a successful far call and check that it's still charged for decommitment. - let account = &mut vm.rich_accounts[0]; - let (_, proxy_counter_abi) = read_proxy_counter_contract(); - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(data.proxy_counter_address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let proxy_counter_cost_key = StorageKey::new( - AccountTreeId::new(data.proxy_counter_address), - H256::from_low_u64_be(1), - ); - let far_call_cost_log = exec_result - .logs - .storage_logs - .iter() - .find(|log| log.log.key == proxy_counter_cost_key) - .expect("no cost log"); - assert!( - far_call_cost_log.previous_value.is_zero(), - "{far_call_cost_log:?}" - ); - let far_call_cost = h256_to_u256(far_call_cost_log.log.value); - assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); + test_get_used_contracts_with_out_of_gas_far_call::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs deleted file mode 100644 index dde83d8a9f3..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_latest::tests::tester::VmTesterBuilder; -use crate::vm_latest::types::inputs::system_env::TxExecutionMode; -use crate::vm_latest::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs index df8d992f02f..522aa2413f6 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs @@ -1,46 +1,6 @@ -use zksync_types::get_nonce_key; - -use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - vm_fast::tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; +use crate::{versions::testonly::is_write_initial::test_is_write_initial_behaviour, vm_fast::Vm}; #[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); +fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 5897ec5f266..0174eeffd7e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -1,198 +1,16 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::StorageWritesDeduplicator, - vm_fast::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - transaction_data::TransactionData, + versions::testonly::l1_tx_execution::{ + test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, }, + vm_fast::Vm, }; #[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 9 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - `gasPerPubdataByte` - // - `basePubdataSpent` - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. - let basic_initial_writes = 5; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - assert!(!res.result.is_failed()); - - for (expected_value, storage_location) in [ - (U256::from(1u32), known_codes_key), - (h256_to_u256(deploy_tx.bytecode_hash), account_code_key), - ] { - assert_eq!( - expected_value, - vm.vm.inner.world_diff().get_storage_state()[&( - *storage_location.address(), - h256_to_u256(*storage_location.key()) - )] - ); - } - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes, basic_initial_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. - // But now the base pubdata spent has changed too. - assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); - assert_eq!(res.repeated_storage_writes, 1); +fn l1_tx_execution() { + test_l1_tx_execution::>(); } #[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - value: 0.into(), - factory_deps: vec![], - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); +fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index fde94d9da6c..0823bee6cc9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -1,424 +1,33 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, L2BlockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, - ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{ - storage::ReadStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, - }, - versions::testonly::default_l1_batch, - vm_fast::{tests::tester::VmTesterBuilder, vm::Vm}, - vm_latest::{ - constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, - utils::l2_blocks::get_l2_block_hash_key, + versions::testonly::l2_blocks::{ + test_l2_block_first_in_batch, test_l2_block_initialization_number_non_zero, + test_l2_block_initialization_timestamp, test_l2_block_new_l2_block, + test_l2_block_same_l2_block, }, + vm_fast::Vm, }; -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: Some(H160::zero()), - calldata: vec![], - value: U256::zero(), - factory_deps: vec![], - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - #[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current L2 block to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); +fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::>(); } #[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first L2 block number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::>(); } #[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::>(); } #[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let mut storage_ptr = vm.vm.world.storage.borrow_mut(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr.set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.set_value( - prev_block_hash_position, - L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), - ); - drop(storage_ptr); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::>(); } #[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.write_to_bootloader_heap([ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ]) +fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index 730c573cdcf..b29ca6ed7f8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -1,26 +1,164 @@ -mod block_tip; -mod bootloader; -mod bytecode_publishing; -mod default_aa; -// mod call_tracer; FIXME: requires tracers -mod circuits; -mod code_oracle; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -// mod prestate_tracer; FIXME: is pre-state tracer still relevant? -mod refunds; -mod require_eip712; -mod rollbacks; -mod sekp256r1; -mod simple_execution; -mod storage; -mod tester; -mod tracing_execution_error; -mod transfer; -mod upgrade; -mod utils; +use std::{any::Any, collections::HashSet, fmt, rc::Rc}; + +use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H160, H256, U256}; +use zksync_utils::h256_to_u256; +use zksync_vm2::interface::{Event, HeapId, StateInterface}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, storage::ReadStorage, CurrentExecutionState, L2BlockEnv, + VmExecutionMode, VmExecutionResultAndLogs, VmInterface, +}; + +use super::Vm; +use crate::{ + interface::storage::{ImmutableStorageView, InMemoryStorage}, + versions::testonly::TestedVm, + vm_fast::CircuitsTracer, +}; + +// mod block_tip; +// mod bootloader; +// mod bytecode_publishing; +// mod circuits; +// mod code_oracle; +// mod default_aa; +// mod gas_limit; +// mod get_used_contracts; +// mod is_write_initial; +// mod l1_tx_execution; +// mod l2_blocks; +// mod nonce_holder; +// mod precompiles; +// mod refunds; +// mod require_eip712; +// mod rollbacks; +// mod secp256r1; +// mod simple_execution; +// mod storage; +// mod tracing_execution_error; +// mod transfer; +// mod upgrade; + +trait ObjectSafeEq: fmt::Debug + AsRef { + fn eq(&self, other: &dyn ObjectSafeEq) -> bool; +} + +#[derive(Debug)] +struct BoxedEq(T); + +impl AsRef for BoxedEq { + fn as_ref(&self) -> &dyn Any { + &self.0 + } +} + +impl ObjectSafeEq for BoxedEq { + fn eq(&self, other: &dyn ObjectSafeEq) -> bool { + let Some(other) = other.as_ref().downcast_ref::() else { + return false; + }; + self.0 == *other + } +} + +// TODO this doesn't include all the state of ModifiedWorld +#[derive(Debug)] +pub(crate) struct VmStateDump { + state: Box, + storage_writes: Vec<((H160, U256), U256)>, + events: Box<[Event]>, +} + +impl PartialEq for VmStateDump { + fn eq(&self, other: &Self) -> bool { + self.state.as_ref().eq(other.state.as_ref()) + && self.storage_writes == other.storage_writes + && self.events == other.events + } +} + +impl TestedVm for Vm> { + type StateDump = VmStateDump; + + fn dump_state(&self) -> Self::StateDump { + VmStateDump { + state: Box::new(BoxedEq(self.inner.dump_state())), + storage_writes: self.inner.get_storage_state().collect(), + events: self.inner.events().collect(), + } + } + + fn gas_remaining(&mut self) -> u32 { + self.gas_remaining() + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_current_execution_state() + } + + fn decommitted_hashes(&self) -> HashSet { + self.decommitted_hashes().collect() + } + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs { + self.enforce_state_diffs(diffs); + self.finish_batch(pubdata_builder) + .block_tip_execution_result + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner(&mut Default::default(), VmExecutionMode::Batch) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + self.insert_bytecodes(bytecodes.iter().copied()) + } + + fn known_bytecode_hashes(&self) -> HashSet { + self.world.bytecode_cache.keys().copied().collect() + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + let (_, is_fresh) = self.inner.world_diff_mut().decommit_opcode( + &mut self.world, + &mut ((), CircuitsTracer::default()), + h256_to_u256(code_hash), + ); + is_fresh + } + + fn verify_required_bootloader_heap(&self, required_values: &[(u32, U256)]) { + for &(slot, expected_value) in required_values { + let current_value = self.inner.read_heap_u256(HeapId::FIRST, slot * 32); + assert_eq!(current_value, expected_value); + } + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + self.write_to_bootloader_heap(cells.iter().copied()); + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + let storage_changes = self.inner.world_diff().get_storage_state(); + let main_storage = &mut self.world.storage; + storage_changes + .get(&(*key.account().address(), h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) + } + + fn last_l2_block_hash(&self) -> H256 { + self.bootloader_state.last_l2_block().get_hash() + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.bootloader_state.push_l2_block(block); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.push_transaction_inner(tx, refund, true); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index 6d1e0f016e9..438d6aabe55 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -1,180 +1,6 @@ -use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, - VmRevertReason, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} +use crate::{versions::testonly::nonce_holder::test_nonce_holder, vm_fast::Vm}; #[test] -fn test_nonce_holder() { - let mut account = Account::random(); - let hex_addr = hex::encode(account.address.to_fixed_bytes()); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![ContractToDeploy::account( - read_nonce_holder_tester().to_vec(), - account.address, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction = account.get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ); - let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { - unreachable!(); - }; - tx_data.signature = vec![test_mode.into()]; - vm.vm.push_transaction_inner(transaction, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); - } else { - assert!(!result.result.is_failed(), "{comment}: {result:?}"); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), - "Allowed to leave nonce as unused", - ); +fn nonce_holder() { + test_nonce_holder::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index b3ca1596217..ccf1463979c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -1,116 +1,19 @@ -use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; -use zksync_types::{Address, Execute}; - -use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + versions::testonly::precompiles::{test_ecrecover, test_keccak, test_sha256}, + vm_fast::Vm, }; #[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let keccak_count = exec_result.statistics.circuit_statistic.keccak256 - * get_geometry_config().cycles_per_keccak256_circuit as f32; - assert!(keccak_count >= 1000.0, "{keccak_count}"); +fn keccak() { + test_keccak::>(); } #[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(sha1000_calldata).unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let sha_count = exec_result.statistics.circuit_statistic.sha256 - * get_geometry_config().cycles_per_sha256_circuit as f32; - assert!(sha_count >= 1000.0, "{sha_count}"); +fn sha256() { + test_sha256::>(); } #[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account.address), - calldata: vec![], - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover - * get_geometry_config().cycles_per_ecrecover_circuit as f32; - assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); +fn ecrecover() { + test_ecrecover::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs deleted file mode 100644 index 63620c7d9ff..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_test_account::TxType; -use zksync_types::{utils::deployed_address_create, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::PrestateTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, -}; - -#[test] -fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - vm.deploy_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm.test_contract.unwrap(), - false, - Default::default(), - true, - TxType::L2, - ); - vm.vm.push_transaction(tx1); - - let contract_address = vm.test_contract.unwrap(); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - assert!(prestate_result.1.contains_key(&contract_address)); -} - -#[test] -fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); - vm.test_contract = Some(deployed_address); - - // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce2 = tx2.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); - - let account = &mut vm.rich_accounts[0]; - - //enter ether to contract to see difference in the balance post execution - let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), - calldata: Default::default(), - value: U256::from(100000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); - - let tx1 = Execute { - contract_address: deployed_address2, - calldata: Default::default(), - value: U256::from(200000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx1, None)); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - //assert that the pre-state contains both deployed contracts with balance zero - assert!(prestate_result.0.contains_key(&deployed_address)); - assert!(prestate_result.0.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.0[&deployed_address].balance, - Some(U256::zero()) - ); - assert_eq!( - prestate_result.0[&deployed_address2].balance, - Some(U256::zero()) - ); - - //assert that the post-state contains both deployed contracts with the correct balance - assert!(prestate_result.1.contains_key(&deployed_address)); - assert!(prestate_result.1.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.1[&deployed_address].balance, - Some(U256::from(100000)) - ); - assert_eq!( - prestate_result.1[&deployed_address2].balance, - Some(U256::from(200000)) - ); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 1856995149a..335cb4afb1c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -1,221 +1,16 @@ -use ethabi::Token; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{read_expensive_contract, read_test_contract}, + versions::testonly::refunds::{ + test_negative_pubdata_for_transaction, test_predetermined_refunded_gas, }, + vm_fast::Vm, }; #[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - vm.vm - .push_transaction_inner(tx.clone(), result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_transaction_inner(tx, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .deduplicated_storage_logs - .len(), - current_state_without_predefined_refunds - .deduplicated_storage_logs - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); +fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::>(); } #[test] fn negative_pubdata_for_transaction() { - let expensive_contract_address = Address::random(); - let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); - let expensive_function = expensive_contract.function("expensive").unwrap(); - let cleanup_function = expensive_contract.function("cleanUp").unwrap(); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - expensive_contract_bytecode, - expensive_contract_address, - )]) - .build(); - - let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: expensive_function - .encode_input(&[Token::Uint(10.into())]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. - let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: cleanup_function.encode_input(&[]).unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - assert!(result.refunds.operator_suggested_refund > 0); - assert_eq!( - result.refunds.gas_refunded, - result.refunds.operator_suggested_refund - ); + test_negative_pubdata_for_transaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index e119cea0114..22e4ebf258c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -1,176 +1,6 @@ -use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; -use zksync_utils::h256_to_u256; +use crate::{versions::testonly::require_eip712::test_require_eip712, vm_fast::Vm}; -use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, -}; - -impl VmTester<()> { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &address, - ); - self.vm - .inner - .world_diff() - .get_storage_state() - .get(&(L2_BASE_TOKEN_ADDRESS, h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(self.vm.world.storage.read_value(&key))) - } -} - -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -#[tokio::test] -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_custom_contracts(vec![ContractToDeploy::account( - bytecode, - account_abstraction.address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account_abstraction.address), - calldata: encoded_input, - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.into(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - Some(beneficiary.address), - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - vec![], - Default::default(), - ); - - let mut transaction_request: TransactionRequest = tx_712.into(); - transaction_request.chain_id = Some(chain_id.into()); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.into(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); +#[test] +fn require_eip712() { + test_require_eip712::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index 548bf8daadf..e8af23fa1e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,173 +1,21 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Execute, Nonce, U256}; - use crate::{ - interface::TxExecutionMode, - vm_fast::tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, + versions::testonly::rollbacks::{ + test_rollback_in_call_mode, test_vm_loadnext_rollbacks, test_vm_rollbacks, }, + vm_fast::Vm, }; #[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), - ), - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - ]); - - pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_rollbacks() { + test_vm_rollbacks::>(); } #[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); +fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::>(); +} - assert_eq!(result_without_rollbacks, result_with_rollbacks); +#[test] +fn rollback_in_call_mode() { + test_rollback_in_call_mode::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs new file mode 100644 index 00000000000..d9661c7f713 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs @@ -0,0 +1,6 @@ +use crate::{versions::testonly::secp256r1::test_secp256r1, vm_fast::Vm}; + +#[test] +fn secp256r1() { + test_secp256r1::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs index 8c916a541e2..4fe33d237e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs @@ -1,80 +1,14 @@ -use assert_matches::assert_matches; - use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::tester::{TxType, VmTesterBuilder}, + versions::testonly::simple_execution::{test_estimate_fee, test_simple_execute}, + vm_fast::Vm, }; #[test] fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); + test_estimate_fee::>(); } #[test] fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); + test_simple_execute::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index 2cfadb640e7..54a38814d3b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -1,133 +1,14 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{ - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::tester::VmTesterBuilder, + versions::testonly::storage::{test_storage_behavior, test_transient_storage_behavior}, + vm_fast::Vm, }; -fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { - let bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let test_contract_address = Address::random(); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata: first_tx_calldata, - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata: second_tx_calldata, - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "First tx failed"); - vm.vm.pop_snapshot_no_rollback(); - - // We rollback once because transient storage and rollbacks are a tricky combination. - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx2.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Second tx failed"); - vm.vm.rollback_to_the_latest_snapshot(); - - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Second tx failed on second run"); - - result.statistics.pubdata_published -} - -fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { - test_storage(vec![], second_tx_calldata) -} - #[test] -fn test_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - // In all of the tests below we provide the first tx to ensure that the tracers will not include - // the statistics from the start of the bootloader and will only include those for the transaction itself. - - let base_pubdata = test_storage_one_tx(vec![]); - let simple_test_pubdata = test_storage_one_tx( - contract - .function("simpleWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_pubdata = test_storage_one_tx( - contract - .function("resettingWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_via_revert_pubdata = test_storage_one_tx( - contract - .function("resettingWriteViaRevert") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - - assert_eq!(simple_test_pubdata - base_pubdata, 65); - assert_eq!(resetting_write_pubdata - base_pubdata, 34); - assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +fn storage_behavior() { + test_storage_behavior::>(); } #[test] -fn test_transient_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let first_tstore_test = contract - .function("testTransientStore") - .unwrap() - .encode_input(&[]) - .unwrap(); - // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. - let second_tstore_test = contract - .function("assertTValue") - .unwrap() - .encode_input(&[Token::Uint(U256::zero())]) - .unwrap(); - - test_storage(first_tstore_test, second_tstore_test); +fn transient_storage_behavior() { + test_transient_storage_behavior::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs deleted file mode 100644 index 212e569d510..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{get_empty_storage, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs deleted file mode 100644 index 5cc9ead8b54..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,242 +0,0 @@ -use std::fmt; - -use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; -use zksync_vm2::interface::{Event, StateInterface}; - -use super::VmTester; -use crate::{ - interface::{ - storage::ReadStorage, CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_fast::Vm, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce(Nonce, Nonce), - NonceReused(H160, Nonce), -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector: vec![144, 240, 73, 201], - data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector: vec![144, 240, 73, 201], - data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], - }) - - } - TxModifier::WrongNonce(expected, actual) => { - let function_selector = vec![98, 106, 222, 48]; - let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); - let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); - // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field - let nonce_padding = vec![0u8; 28]; - let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector, - data - }) - } - TxModifier::NonceReused(addr, nonce) => { - let function_selector = vec![233, 10, 222, 212]; - let addr = addr.as_bytes().to_vec(); - // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field - let addr_padding = vec![0u8; 12]; - // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field - let nonce_padding = vec![0u8; 28]; - let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector, - data, - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce(_, _) => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused(_, _) => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -// TODO this doesn't include all the state of ModifiedWorld -#[derive(Debug)] -struct VmStateDump { - state: S, - storage_writes: Vec<((H160, U256), U256)>, - events: Box<[Event]>, -} - -impl PartialEq for VmStateDump { - fn eq(&self, other: &Self) -> bool { - self.state == other.state - && self.storage_writes == other.storage_writes - && self.events == other.events - } -} - -impl Vm { - fn dump_state(&self) -> VmStateDump { - VmStateDump { - state: self.inner.dump_state(), - storage_writes: self.inner.get_storage_state().collect(), - events: self.inner.events().collect(), - } - } -} - -impl VmTester<()> { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - self.vm.make_snapshot(); - let inner_state_before = self.vm.dump_state(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_state(); - pretty_assertions::assert_eq!( - inner_state_before, - inner_state_after, - "Inner state before and after rollback should be equal" - ); - } else { - self.vm.pop_snapshot_no_rollback(); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs deleted file mode 100644 index 9549b32c4f1..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs +++ /dev/null @@ -1,231 +0,0 @@ -use std::{cell::RefCell, rc::Rc}; - -use zksync_contracts::BaseSystemContracts; -use zksync_test_account::{Account, TxType}; -use zksync_types::{ - block::L2BlockHasher, utils::deployed_address_create, AccountTreeId, Address, L1BatchNumber, - L2BlockNumber, Nonce, StorageKey, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use zksync_vm2::{interface::Tracer, WorldDiff}; - -use crate::{ - interface::{ - storage::{InMemoryStorage, StoragePtr}, - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - versions::{ - testonly::{default_l1_batch, default_system_env, make_account_rich, ContractToDeploy}, - vm_fast::{tests::utils::read_test_contract, vm::Vm}, - }, - vm_latest::utils::l2_blocks::load_last_l2_block, -}; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, Tr>, - pub(crate) storage: StoragePtr, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) fee_account: Address, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.inspect(&mut Tr::default(), VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = Rc::new(RefCell::new(get_empty_storage())); - *self.vm.inner.world_diff_mut() = WorldDiff::default(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(&mut self.storage.borrow_mut(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(&mut self.storage.borrow_mut(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let storage = self.storage.clone(); - { - let mut storage = storage.borrow_mut(); - // Commit pending storage changes (old VM versions commit them on successful execution) - for (&(address, slot), &value) in self.vm.inner.world_diff().get_storage_state() { - let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(slot)); - storage.set_value(key, u256_to_h256(value)); - } - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(&storage).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::custom(l1_batch, self.vm.system_env.clone(), storage); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - self.vm = vm; - } -} - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -impl VmTesterBuilder { - pub(crate) fn new() -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: default_system_env(), - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester<()> { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); - let storage_ptr = Rc::new(RefCell::new(raw_storage)); - for account in self.rich_accounts.iter() { - make_account_rich(&mut storage_ptr.borrow_mut(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(&mut storage_ptr.borrow_mut(), deployer); - } - - let fee_account = l1_batch_env.fee_account; - let vm = Vm::custom(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - deployer: self.deployer, - test_contract: None, - fee_account, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs index 89f0fa23620..b3f5b4b33bc 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -1,55 +1,8 @@ -use zksync_types::{Execute, H160}; - use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::tracing_execution_error::test_tracing_of_execution_errors, vm_fast::Vm, }; #[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![ContractToDeploy::new( - read_error_contract(), - contract_address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(contract_address), - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); +fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index ef510546f11..57c2c3e2c34 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -1,215 +1,16 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::get_balance, + versions::testonly::transfer::{ + test_reentrancy_protection_send_and_transfer, test_send_and_transfer, }, + vm_fast::Vm, }; -enum TestOptions { - Send(U256), - Transfer(U256), -} - -fn test_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let recipient_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - - let test_contract_address = Address::random(); - let recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - }; - - let mut storage = get_empty_storage(); - storage.set_value( - storage_key_for_eth_balance(&test_contract_address), - u256_to_h256(value), - ); - - let mut vm = VmTesterBuilder::new() - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - ContractToDeploy::new(test_bytecode, test_contract_address), - ContractToDeploy::new(recipient_bytecode, recipient_address), - ]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx_result.result.is_failed(), - "Transaction wasn't successful" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); - - let new_recipient_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &recipient_address, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - assert_eq!(new_recipient_balance, value); -} - #[test] -fn test_send_and_transfer() { - test_send_or_transfer(TestOptions::Send(U256::zero())); - test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); -} - -fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - - let test_contract_address = Address::random(); - let reentrant_recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipient_address), - Token::Uint(value), - ]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipient_address), - Token::Uint(value), - ]) - .unwrap(), - ), - }; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - ContractToDeploy::new(test_bytecode, test_contract_address), - ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), - ]) - .build(); - - // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. - let account = &mut vm.rich_accounts[0]; - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(reentrant_recipient_address), - calldata: reentrant_recipient_abi - .function("setX") - .unwrap() - .encode_input(&[]) - .unwrap(), - value: U256::from(1), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx1_result.result.is_failed(), - "Transaction 1 wasn't successful" - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value, - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - tx2_result.result.is_failed(), - "Transaction 2 should have failed, but it succeeded" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +fn send_and_transfer() { + test_send_and_transfer::>(); } #[test] -fn test_reentrancy_protection_send_and_transfer() { - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( - U256::from(10).pow(18.into()), - )); +fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index ba4863f7c45..4e4533c6868 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -1,343 +1,21 @@ -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, - }, - vm_fast::tests::{ - tester::VmTesterBuilder, - utils::{ - get_complex_upgrade_abi, read_complex_upgrade, read_test_contract, - verify_required_storage, - }, + versions::testonly::upgrade::{ + test_complex_upgrader, test_force_deploy_upgrade, test_protocol_upgrade_is_first, }, + vm_fast::Vm, }; -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block #[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); +fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::>(); } -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. #[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = [(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage( - &expected_slots, - &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff().get_storage_state(), - ); +fn force_deploy_upgrade() { + test_force_deploy_upgrade::>(); } -/// Here we show how the work with the complex upgrader could be done #[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - { - let mut storage = vm.storage.borrow_mut(); - storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage.set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage.set_value(account_code_key, bytecode_hash); - storage.store_factory_dep(bytecode_hash, read_complex_upgrade()); - storage.store_factory_dep(msg_sender_test_hash, read_msg_sender_test()); - } - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = [ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage( - &expected_slots, - &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff().get_storage_state(), - ); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: Some(COMPLEX_UPGRADER_ADDRESS), - calldata: complex_upgrader_calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") +fn complex_upgrader() { + test_complex_upgrader::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs deleted file mode 100644 index 5ab5aa0dec9..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ /dev/null @@ -1,137 +0,0 @@ -use std::collections::BTreeMap; - -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H160, H256, - U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm2::interface::{HeapId, StateInterface}; - -use crate::interface::storage::ReadStorage; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -pub(crate) fn verify_required_memory( - state: &impl StateInterface, - required_values: Vec<(U256, HeapId, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state.read_heap_u256(memory_page, cell * 32); - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn verify_required_storage( - required_values: &[(H256, StorageKey)], - main_storage: &mut impl ReadStorage, - storage_changes: &BTreeMap<(H160, U256), U256>, -) { - for &(required_value, key) in required_values { - let current_value = storage_changes - .get(&(*key.account().address(), h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: &mut impl ReadStorage, - storage_changes: &BTreeMap<(H160, U256), U256>, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - - storage_changes - .get(&(*key.account().address(), h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn load_precompiles_contract() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -pub(crate) fn read_expensive_contract() -> (Vec, Contract) { - const PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; - (read_bytecode(PATH), load_contract(PATH)) -} - -pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { - const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; - (read_bytecode(PATH), load_contract(PATH)) -} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 41e37c0d0ba..435b6529c9e 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,6 +1,8 @@ -use std::{collections::HashMap, fmt, mem}; +use std::{collections::HashMap, fmt, mem, rc::Rc}; -use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; +use zk_evm_1_5_0::{ + aux_structures::LogQuery, zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, +}; use zksync_contracts::SystemContractCode; use zksync_types::{ l1::is_l1_tx_type, @@ -11,14 +13,15 @@ use zksync_types::{ BYTES_PER_ENUMERATION_INDEX, }, AccountTreeId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, - BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + Transaction, BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use zksync_vm2::{ interface::{CallframeInterface, HeapId, StateInterface, Tracer}, - ExecutionEnd, FatPointer, Program, Settings, VirtualMachine, + ExecutionEnd, FatPointer, Program, Settings, StorageSlot, VirtualMachine, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use super::{ bootloader_state::{BootloaderState, BootloaderStateSnapshot}, @@ -33,11 +36,12 @@ use crate::{ interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, - TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, - VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, - VmRevertReason, VmTrackingContracts, + ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, PushTransactionResult, + Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, + VmExecutionResultAndLogs, VmExecutionStatistics, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmRevertReason, VmTrackingContracts, }, + is_supported_by_fast_vm, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ bootloader_state::utils::{apply_l2_block, apply_pubdata_to_memory}, @@ -58,6 +62,31 @@ const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemo type FullTracer = (Tr, CircuitsTracer); +#[derive(Debug)] +struct VmRunResult { + execution_result: ExecutionResult, + /// `true` if VM execution has terminated (as opposed to being stopped on a hook, e.g. when executing a single transaction + /// in a batch). Used for `execution_result == Revert { .. }` to understand whether VM logs should be reverted. + execution_ended: bool, + refunds: Refunds, + /// This value is used in stats. It's defined in the old VM as the latest value used when computing refunds (see the refunds tracer for `vm_latest`). + /// This is **not** equal to the pubdata diff before and after VM execution; e.g., when executing a batch tip, + /// `pubdata_published` is always 0 (since no refunds are computed). + pubdata_published: u32, +} + +impl VmRunResult { + fn should_ignore_vm_logs(&self) -> bool { + match &self.execution_result { + ExecutionResult::Success { .. } => false, + ExecutionResult::Halt { .. } => true, + // Logs generated during reverts should only be ignored if the revert has reached the root (bootloader) call frame, + // which is only possible with `TxExecutionMode::EthCall`. + ExecutionResult::Revert { .. } => self.execution_ended, + } + } +} + /// Fast VM wrapper. /// /// The wrapper is parametric by the storage and tracer types. Besides the [`Tracer`] trait, a tracer must have `'static` lifetime @@ -75,8 +104,14 @@ pub struct Vm { enforced_state_diffs: Option>, } -impl Vm { +impl Vm { pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { + assert!( + is_supported_by_fast_vm(system_env.version), + "Protocol version {:?} is not supported by fast VM", + system_env.version + ); + let default_aa_code_hash = system_env .base_system_smart_contracts .default_aa @@ -140,32 +175,35 @@ impl Vm { execution_mode: VmExecutionMode, tracer: &mut (Tr, CircuitsTracer), track_refunds: bool, - ) -> (ExecutionResult, Refunds) { + ) -> VmRunResult { let mut refunds = Refunds { gas_refunded: 0, operator_suggested_refund: 0, }; let mut last_tx_result = None; let mut pubdata_before = self.inner.pubdata() as u32; + let mut pubdata_published = 0; - let result = loop { + let (execution_result, execution_ended) = loop { let hook = match self.inner.run(&mut self.world, tracer) { ExecutionEnd::SuspendedOnHook(hook) => hook, - ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, + ExecutionEnd::ProgramFinished(output) => { + break (ExecutionResult::Success { output }, true); + } ExecutionEnd::Reverted(output) => { - break match TxRevertReason::parse_error(&output) { + let result = match TxRevertReason::parse_error(&output) { TxRevertReason::TxReverted(output) => ExecutionResult::Revert { output }, TxRevertReason::Halt(reason) => ExecutionResult::Halt { reason }, - } + }; + break (result, true); } ExecutionEnd::Panicked => { - break ExecutionResult::Halt { - reason: if self.gas_remaining() == 0 { - Halt::BootloaderOutOfGas - } else { - Halt::VMPanic - }, - } + let reason = if self.gas_remaining() == 0 { + Halt::BootloaderOutOfGas + } else { + Halt::VMPanic + }; + break (ExecutionResult::Halt { reason }, true); } }; @@ -175,7 +213,7 @@ impl Vm { } Hook::TxHasEnded => { if let VmExecutionMode::OneTx = execution_mode { - break last_tx_result.take().unwrap(); + break (last_tx_result.take().unwrap(), false); } } Hook::AskOperatorForRefund => { @@ -192,7 +230,8 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.pubdata() as u32; + let pubdata_after = self.inner.pubdata() as u32; + pubdata_published = pubdata_after.saturating_sub(pubdata_before); refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -200,7 +239,7 @@ impl Vm { gas_spent_on_pubdata.as_u64(), tx_gas_limit, gas_per_pubdata_byte.low_u32(), - pubdata_published.saturating_sub(pubdata_before), + pubdata_published, self.bootloader_state .last_l2_block() .txs @@ -209,7 +248,7 @@ impl Vm { .hash, ); - pubdata_before = pubdata_published; + pubdata_before = pubdata_after; let refund_value = refunds.operator_suggested_refund; self.write_to_bootloader_heap([( OPERATOR_REFUNDS_OFFSET + current_tx_index, @@ -305,7 +344,12 @@ impl Vm { } }; - (result, refunds) + VmRunResult { + execution_result, + execution_ended, + refunds, + pubdata_published, + } } fn get_hook_params(&self) -> [U256; 3] { @@ -419,10 +463,10 @@ impl Vm { } // FIXME: restore this function once fast vm is enabled - // #[cfg(test)] - // pub(super) fn enforce_state_diffs(&mut self, diffs: Vec) { - // self.enforced_state_diffs = Some(diffs); - // } + #[cfg(test)] + pub(super) fn enforce_state_diffs(&mut self, diffs: Vec) { + self.enforced_state_diffs = Some(diffs); + } fn compute_state_diffs(&mut self) -> Vec { #[cfg(test)] @@ -431,24 +475,24 @@ impl Vm { } let storage = &mut self.world.storage; - let diffs = self.inner.world_diff().get_storage_changes().map( - move |((address, key), (initial_value, final_value))| { - let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); - StateDiffRecord { - address, - key, - derived_key: - zk_evm_1_5_0::aux_structures::LogQuery::derive_final_address_for_params( - &address, &key, - ), - enumeration_index: storage - .get_enumeration_index(&storage_key) - .unwrap_or_default(), - initial_value: initial_value.unwrap_or_default(), - final_value, - } - }, - ); + let diffs = + self.inner + .world_diff() + .get_storage_changes() + .map(move |((address, key), change)| { + let storage_key = + StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); + StateDiffRecord { + address, + key, + derived_key: LogQuery::derive_final_address_for_params(&address, &key), + enumeration_index: storage + .get_enumeration_index(&storage_key) + .unwrap_or_default(), + initial_value: change.before, + final_value: change.after, + } + }); diffs .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) .collect() @@ -478,9 +522,9 @@ impl Vm { events, deduplicated_storage_logs: world_diff .get_storage_changes() - .map(|((address, key), (_, value))| StorageLog { + .map(|((address, key), change)| StorageLog { key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), - value: u256_to_h256(value), + value: u256_to_h256(change.after), kind: StorageLogKind::RepeatedWrite, // Initialness doesn't matter here }) .collect(), @@ -491,33 +535,10 @@ impl Vm { pubdata_costs: world_diff.pubdata_costs().to_vec(), } } -} - -impl VmFactory> for Vm, Tr> -where - S: ReadStorage, - Tr: Tracer + Default + 'static, -{ - fn new( - batch_env: L1BatchEnv, - system_env: SystemEnv, - storage: StoragePtr>, - ) -> Self { - let storage = ImmutableStorageView::new(storage); - Self::custom(batch_env, system_env, storage) - } -} -impl VmInterface for Vm { - type TracerDispatcher = Tr; - - fn push_transaction(&mut self, tx: zksync_types::Transaction) { - self.push_transaction_inner(tx, 0, true); - } - - fn inspect( + pub(crate) fn inspect_inner( &mut self, - tracer: &mut Self::TracerDispatcher, + tracer: &mut Tr, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { let mut track_refunds = false; @@ -528,18 +549,19 @@ impl VmInterface for Vm { } let start = self.inner.world_diff().snapshot(); - let pubdata_before = self.inner.pubdata(); let gas_before = self.gas_remaining(); let mut full_tracer = (mem::take(tracer), CircuitsTracer::default()); - let (result, refunds) = self.run(execution_mode, &mut full_tracer, track_refunds); + let result = self.run(execution_mode, &mut full_tracer, track_refunds); *tracer = full_tracer.0; // place the tracer back - let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) - && matches!(result, ExecutionResult::Halt { .. }); + let ignore_world_diff = + matches!(execution_mode, VmExecutionMode::OneTx) && result.should_ignore_vm_logs(); // If the execution is halted, the VM changes are expected to be rolled back by the caller. // Earlier VMs return empty execution logs in this case, so we follow this behavior. + // Likewise, if a revert has reached the bootloader frame (possible with `TxExecutionMode::EthCall`; otherwise, the bootloader catches reverts), + // old VMs revert all logs; the new VM doesn't do that automatically, so we recreate this behavior here. let logs = if ignore_world_diff { VmExecutionLogs::default() } else { @@ -557,7 +579,7 @@ impl VmInterface for Vm { StorageLogKind::RepeatedWrite }, }, - previous_value: u256_to_h256(change.before.unwrap_or_default()), + previous_value: u256_to_h256(change.before), }) .collect(); let events = merge_events( @@ -585,26 +607,65 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.pubdata(); let gas_remaining = self.gas_remaining(); + let gas_used = gas_before - gas_remaining; + VmExecutionResultAndLogs { - result, + result: result.execution_result, logs, // TODO (PLA-936): Fill statistics; investigate whether they should be zeroed on `Halt` statistics: VmExecutionStatistics { + gas_used: gas_used.into(), + gas_remaining, + computational_gas_used: gas_used, // since 1.5.0, this always has the same value as `gas_used` + pubdata_published: result.pubdata_published, + circuit_statistic: full_tracer.1.circuit_statistic(), contracts_used: 0, cycles_used: 0, - gas_used: (gas_before - gas_remaining).into(), - gas_remaining, - computational_gas_used: 0, total_log_queries: 0, - pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, - circuit_statistic: full_tracer.1.circuit_statistic(), }, - refunds, + refunds: result.refunds, + new_known_factory_deps: None, + } + } +} + +impl VmFactory> for Vm, Tr> +where + S: ReadStorage, + Tr: Tracer + Default + 'static, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let storage = ImmutableStorageView::new(storage); + Self::custom(batch_env, system_env, storage) + } +} + +impl VmInterface for Vm { + type TracerDispatcher = Tr; + + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_inner(tx, 0, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), } } + fn inspect( + &mut self, + tracer: &mut Self::TracerDispatcher, + execution_mode: InspectExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode.into()) + } + fn inspect_transaction_with_bytecode_compression( &mut self, tracer: &mut Self::TracerDispatcher, @@ -612,7 +673,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_inner(tx, 0, with_compression); - let result = self.inspect(tracer, VmExecutionMode::OneTx); + let result = self.inspect(tracer, InspectExecutionMode::OneTx); let compression_result = if self.has_unpublished_bytecodes() { Err(BytecodeCompressionError::BytecodeCompressionFailed) @@ -629,12 +690,8 @@ impl VmInterface for Vm { self.bootloader_state.start_new_l2_block(l2_block_env) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - todo!("Unused during batch execution") - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut Tr::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut Tr::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { @@ -745,20 +802,27 @@ impl World { } impl zksync_vm2::StorageInterface for World { - fn read_storage(&mut self, contract: H160, key: U256) -> Option { + fn read_storage(&mut self, contract: H160, key: U256) -> StorageSlot { let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); - if self.storage.is_write_initial(key) { - None - } else { - Some(self.storage.read_value(key).as_bytes().into()) + let value = U256::from_big_endian(self.storage.read_value(key).as_bytes()); + // `is_write_initial` value can be true even if the slot has previously been written to / has non-zero value! + // This can happen during oneshot execution (i.e., executing a single transaction) since it emulates + // execution starting in the middle of a batch in the general case. Hence, a slot that was first written to in the batch + // must still be considered an initial write by the refund logic. + let is_write_initial = self.storage.is_write_initial(key); + StorageSlot { + value, + is_write_initial, } } - fn cost_of_writing_storage(&mut self, initial_value: Option, new_value: U256) -> u32 { - let is_initial = initial_value.is_none(); - let initial_value = initial_value.unwrap_or_default(); + fn read_storage_value(&mut self, contract: H160, key: U256) -> U256 { + let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); + U256::from_big_endian(self.storage.read_value(key).as_bytes()) + } - if initial_value == new_value { + fn cost_of_writing_storage(&mut self, slot: StorageSlot, new_value: U256) -> u32 { + if slot.value == new_value { return 0; } @@ -772,10 +836,9 @@ impl zksync_vm2::StorageInterface for World { // For value compression, we use a metadata byte which holds the length of the value and the operation from the // previous state to the new state, and the compressed value. The maximum for this is 33 bytes. // Total bytes for initial writes then becomes 65 bytes and repeated writes becomes 38 bytes. - let compressed_value_size = - compress_with_best_strategy(initial_value, new_value).len() as u32; + let compressed_value_size = compress_with_best_strategy(slot.value, new_value).len() as u32; - if is_initial { + if slot.is_write_initial { (BYTES_PER_DERIVED_KEY as u32) + compressed_value_size } else { (BYTES_PER_ENUMERATION_INDEX as u32) + compressed_value_size diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index 122704c24b9..2085bbaba31 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -1,14 +1,15 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; -use zksync_types::{commitment::PubdataParams, L2ChainId, U256}; +use zksync_types::{L2ChainId, ProtocolVersionId, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; -use super::{ - tx::BootloaderTx, - utils::{apply_pubdata_to_memory, get_encoded_pubdata}, -}; +use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, + interface::{ + pubdata::PubdataInput, BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, + TxExecutionMode, + }, vm_latest::{ bootloader_state::{ l2_block::BootloaderL2Block, @@ -16,9 +17,8 @@ use crate::{ utils::{apply_l2_block, apply_tx_to_memory}, }, constants::TX_DESCRIPTION_OFFSET, - types::internals::{PubdataInput, TransactionData}, + types::internals::TransactionData, utils::l2_blocks::assert_next_block, - MultiVMSubversion, }, }; @@ -49,10 +49,8 @@ pub struct BootloaderState { free_tx_offset: usize, /// Information about the the pubdata that will be needed to supply to the L1Messenger pubdata_information: OnceCell, - /// Params related to how the pubdata should be processed by the bootloader in the batch - pubdata_params: PubdataParams, - /// VM subversion - subversion: MultiVMSubversion, + /// Protocol version. + protocol_version: ProtocolVersionId, } impl BootloaderState { @@ -60,8 +58,7 @@ impl BootloaderState { execution_mode: TxExecutionMode, initial_memory: BootloaderMemory, first_l2_block: L2BlockEnv, - pubdata_params: PubdataParams, - subversion: MultiVMSubversion, + protocol_version: ProtocolVersionId, ) -> Self { let l2_block = BootloaderL2Block::new(first_l2_block, 0); Self { @@ -72,8 +69,7 @@ impl BootloaderState { execution_mode, free_tx_offset: 0, pubdata_information: Default::default(), - pubdata_params, - subversion, + protocol_version, } } @@ -154,22 +150,13 @@ impl BootloaderState { .expect("Pubdata information is not set") } - pub(crate) fn get_encoded_pubdata(&self) -> Vec { + pub(crate) fn settlement_layer_pubdata(&self, pubdata_builder: &dyn PubdataBuilder) -> Vec { let pubdata_information = self .pubdata_information .get() - .expect("Pubdata information is not set") - .clone(); + .expect("Pubdata information is not set"); - match self.subversion { - MultiVMSubversion::SmallBootloaderMemory - | MultiVMSubversion::IncreasedBootloaderMemory => { - pubdata_information.build_pubdata_legacy(false) - } - MultiVMSubversion::Gateway => { - get_encoded_pubdata(pubdata_information, self.pubdata_params, false) - } - } + pubdata_builder.settlement_layer_pubdata(pubdata_information, self.protocol_version) } fn last_mut_l2_block(&mut self) -> &mut BootloaderL2Block { @@ -177,7 +164,10 @@ impl BootloaderState { } /// Apply all bootloader transaction to the initial memory - pub(crate) fn bootloader_memory(&self) -> BootloaderMemory { + pub(crate) fn bootloader_memory( + &self, + pubdata_builder: &dyn PubdataBuilder, + ) -> BootloaderMemory { let mut initial_memory = self.initial_memory.clone(); let mut offset = 0; let mut compressed_bytecodes_offset = 0; @@ -205,15 +195,14 @@ impl BootloaderState { let pubdata_information = self .pubdata_information - .clone() - .into_inner() + .get() .expect("Empty pubdata information"); apply_pubdata_to_memory( &mut initial_memory, + pubdata_builder, pubdata_information, - self.pubdata_params, - self.subversion, + self.protocol_version, ); initial_memory } @@ -328,11 +317,7 @@ impl BootloaderState { } } - pub(crate) fn get_pubdata_params(&self) -> PubdataParams { - self.pubdata_params - } - - pub(crate) fn get_vm_subversion(&self) -> MultiVMSubversion { - self.subversion + pub(crate) fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version } } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 09085402b0d..c409bda35c1 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -1,12 +1,12 @@ -use zksync_types::{ - commitment::{L1BatchCommitmentMode, PubdataParams}, - ethabi, U256, -}; +use zksync_types::{ethabi, ProtocolVersionId, U256}; use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + interface::{ + pubdata::{PubdataBuilder, PubdataInput}, + BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode, + }, utils::bytecode, vm_latest::{ bootloader_state::l2_block::BootloaderL2Block, @@ -17,11 +17,6 @@ use crate::{ TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, }, - types::internals::{ - pubdata::{PubdataBuilder, RollupPubdataBuilder, ValidiumPubdataBuilder}, - PubdataInput, - }, - MultiVMSubversion, }, }; @@ -131,70 +126,60 @@ fn apply_l2_block_inner( ]) } -pub(crate) fn get_encoded_pubdata( - pubdata_information: PubdataInput, - pubdata_params: PubdataParams, - l2_version: bool, +fn bootloader_memory_input( + pubdata_builder: &dyn PubdataBuilder, + input: &PubdataInput, + protocol_version: ProtocolVersionId, ) -> Vec { - let pubdata_bytes: Vec = if pubdata_params.pubdata_type == L1BatchCommitmentMode::Rollup { - RollupPubdataBuilder::new().build_pubdata(pubdata_information, l2_version) - } else { - ValidiumPubdataBuilder::new().build_pubdata(pubdata_information, l2_version) - }; + let l2_da_validator_address = pubdata_builder.l2_da_validator(); + let operator_input = pubdata_builder.l1_messenger_operator_input(input, protocol_version); - if l2_version { - ethabi::encode(&[ - ethabi::Token::Address(pubdata_params.l2_da_validator_address), - ethabi::Token::Bytes(pubdata_bytes), - ]) - .to_vec() - } else { - pubdata_bytes - } + ethabi::encode(&[ + ethabi::Token::Address(l2_da_validator_address), + ethabi::Token::Bytes(operator_input), + ]) } pub(crate) fn apply_pubdata_to_memory( memory: &mut BootloaderMemory, - pubdata_information: PubdataInput, - pubdata_params: PubdataParams, - subversion: MultiVMSubversion, + pubdata_builder: &dyn PubdataBuilder, + pubdata_information: &PubdataInput, + protocol_version: ProtocolVersionId, ) { - let (l1_messenger_pubdata_start_slot, pubdata) = match subversion { - MultiVMSubversion::SmallBootloaderMemory | MultiVMSubversion::IncreasedBootloaderMemory => { - // Skipping two slots as they will be filled by the bootloader itself: - // - One slot is for the selector of the call to the L1Messenger. - // - The other slot is for the 0x20 offset for the calldata. - let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; - - // Need to skip first word as it represents array offset - // while bootloader expects only [len || data] - let pubdata = ethabi::encode(&[ethabi::Token::Bytes( - pubdata_information.build_pubdata_legacy(true), - )])[32..] - .to_vec(); - - assert!( - pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, - "The encoded pubdata is too big" - ); - - (l1_messenger_pubdata_start_slot, pubdata) - } - MultiVMSubversion::Gateway => { - // Skipping the first slot as it will be filled by the bootloader itself: - // It is for the selector of the call to the L1Messenger. - let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 1; - - let pubdata = get_encoded_pubdata(pubdata_information, pubdata_params, true); - - assert!( - // Note that unlike the previous version, the difference is `1`, since now it also includes the offset - pubdata.len() / 32 < OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, - "The encoded pubdata is too big" - ); - - (l1_messenger_pubdata_start_slot, pubdata) - } + let (l1_messenger_pubdata_start_slot, pubdata) = if protocol_version.is_pre_gateway() { + // Skipping two slots as they will be filled by the bootloader itself: + // - One slot is for the selector of the call to the L1Messenger. + // - The other slot is for the 0x20 offset for the calldata. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; + + // Need to skip first word as it represents array offset + // while bootloader expects only [len || data] + let pubdata = ethabi::encode(&[ethabi::Token::Bytes( + pubdata_builder.l1_messenger_operator_input(pubdata_information, protocol_version), + )])[32..] + .to_vec(); + + assert!( + pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) + } else { + // Skipping the first slot as it will be filled by the bootloader itself: + // It is for the selector of the call to the L1Messenger. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 1; + + let pubdata = + bootloader_memory_input(pubdata_builder, pubdata_information, protocol_version); + + assert!( + // Note that unlike the previous version, the difference is `1`, since now it also includes the offset + pubdata.len() / 32 < OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) }; pubdata @@ -227,8 +212,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index 2f23bfb89f0..d9331720ce2 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -14,6 +14,7 @@ use crate::{ circuits_capacity::circuit_statistic_from_cycles, dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer, }, + utils::extract_bytecodes_marked_as_known, vm::Vm, }, HistoryMode, @@ -55,6 +56,10 @@ impl Vm { .then_some(RefundsTracer::new(self.batch_env.clone(), self.subversion)); let mut tx_tracer: DefaultExecutionTracer = DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, + self.system_env + .base_system_smart_contracts + .evm_emulator + .is_some(), execution_mode, mem::take(dispatcher), self.storage.clone(), @@ -64,7 +69,7 @@ impl Vm { self.batch_env.clone(), execution_mode, self.subversion, - self.system_env.version, + None, )) }), self.subversion, @@ -96,6 +101,8 @@ impl Vm { circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); + let factory_deps_marked_as_known = extract_bytecodes_marked_as_known(&logs.events); + let new_known_factory_deps = self.decommit_bytecodes(&factory_deps_marked_as_known); *dispatcher = tx_tracer.dispatcher; let result = VmExecutionResultAndLogs { @@ -103,6 +110,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: Some(new_known_factory_deps), }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index 34c1e1f81da..c1cf1504356 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -51,7 +51,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs index 98d71efa00f..6dd73866adf 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs @@ -59,7 +59,12 @@ impl Vm { tx: Transaction, with_compression: bool, ) { - let tx: TransactionData = tx.into(); + let use_evm_emulator = self + .system_env + .base_system_smart_contracts + .evm_emulator + .is_some(); + let tx = TransactionData::new(tx, use_evm_emulator); let overhead = tx.overhead_gas(); self.push_raw_transaction(tx, overhead, 0, with_compression); } diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs index 0315aa38327..d91fbfdb24d 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs @@ -5,9 +5,7 @@ use zk_evm_1_5_0::{ aux_structures::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, - zkevm_opcode_defs::{ - ContractCodeSha256, VersionedHashDef, VersionedHashHeader, VersionedHashNormalizedPreimage, - }, + zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, }; use zksync_types::{H256, U256}; use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; @@ -166,8 +164,8 @@ impl DecommittmentProcess _monotonic_cycle_counter: u32, mut partial_query: DecommittmentQuery, ) -> anyhow::Result { - let (stored_hash, length) = stored_hash_from_query(&partial_query); - partial_query.decommitted_length = length; + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); if let Some(memory_page) = self .decommitted_code_hashes @@ -178,10 +176,10 @@ impl DecommittmentProcess { partial_query.is_fresh = false; partial_query.memory_page = MemoryPage(memory_page); + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } else { - partial_query.is_fresh = true; if self .decommitted_code_hashes .inner() @@ -190,7 +188,9 @@ impl DecommittmentProcess { self.decommitted_code_hashes .insert(stored_hash, None, partial_query.timestamp); - } + }; + partial_query.is_fresh = true; + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } @@ -204,11 +204,10 @@ impl DecommittmentProcess memory: &mut M, ) -> anyhow::Result>> { assert!(partial_query.is_fresh); - self.decommitment_requests.push((), partial_query.timestamp); - let stored_hash = stored_hash_from_query(&partial_query).0; - + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); // We are fetching a fresh bytecode that we didn't read before. let values = self.get_bytecode(stored_hash, partial_query.timestamp); let page_to_use = partial_query.memory_page; @@ -251,28 +250,49 @@ impl DecommittmentProcess } } -fn concat_header_and_preimage( - header: VersionedHashHeader, - normalized_preimage: VersionedHashNormalizedPreimage, -) -> [u8; 32] { - let mut buffer = [0u8; 32]; +#[derive(Debug)] +// TODO: consider moving this to the zk-evm crate +enum VersionedCodeHash { + ZkEVM(VersionedHashHeader, VersionedHashNormalizedPreimage), + Evm(VersionedHashHeader, VersionedHashNormalizedPreimage), +} - buffer[0..4].copy_from_slice(&header.0); - buffer[4..32].copy_from_slice(&normalized_preimage.0); +impl VersionedCodeHash { + fn from_query(query: &DecommittmentQuery) -> Self { + match query.header.0[0] { + 1 => Self::ZkEVM(query.header, query.normalized_preimage), + 2 => Self::Evm(query.header, query.normalized_preimage), + _ => panic!("Unsupported hash version"), + } + } - buffer -} + /// Returns the hash in the format it is stored in the DB. + fn to_stored_hash(&self) -> U256 { + let (header, preimage) = match self { + Self::ZkEVM(header, preimage) => (header, preimage), + Self::Evm(header, preimage) => (header, preimage), + }; -/// For a given decommitment query, returns a pair of the stored hash as U256 and the length of the preimage in 32-byte words. -fn stored_hash_from_query(partial_query: &DecommittmentQuery) -> (U256, u16) { - let full_hash = - concat_header_and_preimage(partial_query.header, partial_query.normalized_preimage); + let mut hash = [0u8; 32]; + hash[0..4].copy_from_slice(&header.0); + hash[4..32].copy_from_slice(&preimage.0); - let versioned_hash = - ContractCodeSha256::try_deserialize(full_hash).expect("Invalid ContractCodeSha256 hash"); + // Hash[1] is used in both of the versions to denote whether the bytecode is being constructed. + // We ignore this param. + hash[1] = 0; - let stored_hash = H256(ContractCodeSha256::serialize_to_stored(versioned_hash).unwrap()); - let length = versioned_hash.code_length_in_words; + h256_to_u256(H256(hash)) + } - (h256_to_u256(stored_hash), length) + fn get_preimage_length(&self) -> u32 { + // In zkEVM the hash[2..3] denotes the length of the preimage in words, while + // in EVM the hash[2..3] denotes the length of the preimage in bytes. + match self { + Self::ZkEVM(header, _) => { + let length_in_words = header.0[2] as u32 * 256 + header.0[3] as u32; + length_in_words * 32 + } + Self::Evm(header, _) => header.0[2] as u32 * 256 + header.0[3] as u32, + } + } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index e4948f18030..df4a36f2d3d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -1,430 +1,9 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, - l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, ProtocolVersionId, H256, - U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tests::tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTesterBuilder, - }, - tracers::PubdataTracer, - HistoryEnabled, TracerDispatcher, - }, + versions::testonly::block_tip::test_dry_run_upper_bound, + vm_latest::{HistoryEnabled, Vm}, }; -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -const CALLS_PER_TX: usize = 1_000; -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .chunks(CALLS_PER_TX) - .into_iter() - .map(|chunk| { - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(chunk.collect_vec())]) - .unwrap() - }) - .collect_vec(); - - encoded_calls -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute - // the gas limit - - let batch_env = L1BatchEnv { - fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), - ..default_l1_batch(zksync_types::L1BatchNumber(1)) - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_l1_batch_env(batch_env) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let txs_data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - - for (i, data) in txs_data.into_iter().enumerate() { - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), - calldata: data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {:#?}", - test_data - ); - } - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - crate::vm_latest::MultiVMSubversion::latest(), - ProtocolVersionId::Version25, - ); - - let result = vm.vm.inspect_inner( - &mut TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used - ); - - TestStatistics { - max_used_gas: ergs_before - ergs_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} - #[test] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let statistics = vec![ - // max logs - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }, - // max messages - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }, - // long message - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }, - // max bytecodes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }, - // long bytecode - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![ - vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; - 1 - ], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }, - // lots of small repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }, - // lots of big repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - true, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, - ), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }, - // lots of small initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs( - false, - true, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, - ), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }, - // lots of large initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - false, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, - ), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }, - ]; - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); +fn dry_run_upper_bound() { + test_dry_run_upper_bound::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs index 9d23f658cb8..22239a6c1e3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs @@ -1,57 +1,14 @@ -use assert_matches::assert_matches; -use zksync_types::U256; - use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, - vm_latest::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, + versions::testonly::bootloader::{test_bootloader_out_of_gas, test_dummy_bootloader}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); +fn dummy_bootloader() { + test_dummy_bootloader::>(); } #[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); +fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs index 2ed9948af81..e0727fbed89 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs @@ -1,41 +1,9 @@ use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::bytecode, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, + versions::testonly::bytecode_publishing::test_bytecode_publishing, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); +fn bytecode_publishing() { + test_bytecode_publishing::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index e7f26b7faf8..b502ea50b1a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -3,17 +3,14 @@ use std::sync::Arc; use once_cell::sync::OnceCell; use zksync_types::{Address, Execute}; +use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface}, tracers::CallTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, + versions::testonly::{ + read_max_depth_contract, read_test_contract, ContractToDeploy, VmTesterBuilder, }, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, }; // This test is ultra slow, so it's ignored by default. @@ -22,14 +19,13 @@ use crate::{ fn test_max_depth() { let contarct = read_max_depth_contract(); let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); + .with_custom_contracts(vec![ContractToDeploy::account(contarct, address)]) + .build::(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( @@ -47,23 +43,22 @@ fn test_max_depth() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); assert!(result.get().is_some()); assert!(res.result.is_failed()); } #[test] fn test_basic_behavior() { - let contarct = read_test_contract(); + let contract = read_test_contract(); let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); let increment_by_6_calldata = "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; @@ -84,7 +79,7 @@ fn test_basic_behavior() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); let call_tracer_result = result.get().unwrap(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index c3c6816cbd8..690af7d2a35 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -1,76 +1,9 @@ -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, + versions::testonly::circuits::test_circuits, + vm_latest::{HistoryEnabled, Vm}, }; -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. #[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(Address::random()), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 13] = [ - 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, - 0.0, 0.0, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - (s.secp256k1_verify, "secp256k1_verify"), - (s.transient_storage_checker, "transient_storage_checker"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } +fn circuits() { + test_circuits::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index b15ef7fde2b..e50e2aafcbf 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -1,282 +1,21 @@ -use ethabi::Token; -use zk_evm_1_5_0::{ - aux_structures::{MemoryPage, Timestamp}, - zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, -}; -use zksync_types::{ - get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, - }, - HistoryEnabled, + versions::testonly::code_oracle::{ + test_code_oracle, test_code_oracle_big_bytecode, test_refunds_in_code_oracle, }, + vm_latest::{HistoryEnabled, Vm}, }; -fn generate_large_bytecode() -> Vec { - // This is the maximal possible size of a zkEVM bytecode - vec![2u8; ((1 << 16) - 1) * 32] -} - #[test] -fn test_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - // Filling the zkevm bytecode - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode, - precompiles_contract_address, - false, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(normal_zkevm_bytecode_hash), - bytes_to_be_words(normal_zkevm_bytecode), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // Now, we ask for the same bytecode. We use to partially check whether the memory page with - // the decommitted bytecode gets erased (it shouldn't). - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); -} - -fn find_code_oracle_cost_log( - precompiles_contract_address: Address, - logs: &[StorageLogWithPreviousValue], -) -> &StorageLogWithPreviousValue { - logs.iter() - .find(|log| { - *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() - }) - .expect("no code oracle cost log") +fn code_oracle() { + test_code_oracle::>(); } #[test] -fn test_code_oracle_big_bytecode() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let big_zkevm_bytecode = generate_large_bytecode(); - let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); - let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); - - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&big_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode, - precompiles_contract_address, - false, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(big_zkevm_bytecode_hash), - bytes_to_be_words(big_zkevm_bytecode), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); +fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::>(); } #[test] fn refunds_in_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_words = bytes_to_be_words(normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - // Execute code oracle twice with identical VM state that only differs in that the queried bytecode - // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas - // for already decommitted codes). - let mut oracle_costs = vec![]; - for decommit in [false, true] { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode.clone(), - precompiles_contract_address, - false, - )]) - .with_storage(storage.clone()) - .build(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(normal_zkevm_bytecode_hash), - normal_zkevm_bytecode_words.clone(), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - if decommit { - let (header, normalized_preimage) = - ContractCodeSha256Format::normalize_for_decommitment(&normal_zkevm_bytecode_hash.0); - let query = vm - .vm - .state - .prepare_to_decommit( - 0, - header, - normalized_preimage, - MemoryPage(123), - Timestamp(0), - ) - .unwrap(); - - assert!(query.is_fresh); - vm.vm.state.execute_decommit(0, query).unwrap(); - } - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - let log = - find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); - oracle_costs.push(log.log.value); - } - - // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` - // in `CodeOracle.yul`. - let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); - assert_eq!( - code_oracle_refund, - (4 * normal_zkevm_bytecode_words.len()).into() - ); + test_refunds_in_code_oracle::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs index ad00bbb1574..3d0e21c2466 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs @@ -1,86 +1,9 @@ -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - HistoryEnabled, - }, + versions::testonly::default_aa::test_default_aa_interaction, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - if batch_result.result.is_failed() { - panic!("Batch execution failed: {:?}", batch_result.result); - } - assert!( - !batch_result.result.is_failed(), - "Transaction wasn't successful" - ); - - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); +fn default_aa_interaction() { + test_default_aa_interaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs new file mode 100644 index 00000000000..4d6e77aed51 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -0,0 +1,507 @@ +use std::collections::HashMap; + +use ethabi::Token; +use test_casing::{test_casing, Product}; +use zksync_contracts::{load_contract, read_bytecode, SystemContractCode}; +use zksync_system_constants::{ + CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, +}; +use zksync_test_account::TxType; +use zksync_types::{ + get_code_key, get_known_code_key, + utils::{key_for_eth_balance, storage_key_for_eth_balance}, + AccountTreeId, Address, Execute, StorageKey, H256, U256, +}; +use zksync_utils::{ + be_words_to_bytes, + bytecode::{hash_bytecode, hash_evm_bytecode}, + bytes_to_be_words, h256_to_u256, +}; + +use super::TestedLatestVm; +use crate::{ + interface::{ + storage::InMemoryStorage, TxExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, + }, + versions::testonly::{default_system_env, VmTester, VmTesterBuilder}, +}; + +const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; +const MOCK_KNOWN_CODE_STORAGE_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockKnownCodeStorage.json"; +const MOCK_EMULATOR_PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockEvmEmulator.json"; +const RECURSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/NativeRecursiveContract.json"; +const INCREMENTING_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/IncrementingContract.json"; + +fn override_system_contracts(storage: &mut InMemoryStorage) { + let mock_deployer = read_bytecode(MOCK_DEPLOYER_PATH); + let mock_deployer_hash = hash_bytecode(&mock_deployer); + let mock_known_code_storage = read_bytecode(MOCK_KNOWN_CODE_STORAGE_PATH); + let mock_known_code_storage_hash = hash_bytecode(&mock_known_code_storage); + + storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); + storage.set_value( + get_known_code_key(&mock_deployer_hash), + H256::from_low_u64_be(1), + ); + storage.set_value( + get_code_key(&KNOWN_CODES_STORAGE_ADDRESS), + mock_known_code_storage_hash, + ); + storage.set_value( + get_known_code_key(&mock_known_code_storage_hash), + H256::from_low_u64_be(1), + ); + storage.store_factory_dep(mock_deployer_hash, mock_deployer); + storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); +} + +#[derive(Debug)] +struct EvmTestBuilder { + deploy_emulator: bool, + storage: InMemoryStorage, + evm_contract_addresses: Vec
, +} + +impl EvmTestBuilder { + fn new(deploy_emulator: bool, evm_contract_address: Address) -> Self { + Self { + deploy_emulator, + storage: InMemoryStorage::with_system_contracts(hash_bytecode), + evm_contract_addresses: vec![evm_contract_address], + } + } + + fn with_mock_deployer(mut self) -> Self { + override_system_contracts(&mut self.storage); + self + } + + fn with_evm_address(mut self, address: Address) -> Self { + self.evm_contract_addresses.push(address); + self + } + + fn build(self) -> VmTester { + let mock_emulator = read_bytecode(MOCK_EMULATOR_PATH); + let mut storage = self.storage; + let mut system_env = default_system_env(); + if self.deploy_emulator { + let evm_bytecode: Vec<_> = (0..32).collect(); + let evm_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + storage.set_value( + get_known_code_key(&evm_bytecode_hash), + H256::from_low_u64_be(1), + ); + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), evm_bytecode_hash); + } + + system_env.base_system_smart_contracts.evm_emulator = Some(SystemContractCode { + hash: hash_bytecode(&mock_emulator), + code: bytes_to_be_words(mock_emulator), + }); + } else { + let emulator_hash = hash_bytecode(&mock_emulator); + storage.set_value(get_known_code_key(&emulator_hash), H256::from_low_u64_be(1)); + storage.store_factory_dep(emulator_hash, mock_emulator); + + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), emulator_hash); + // Set `isUserSpace` in the emulator storage to `true`, so that it skips emulator-specific checks + storage.set_value( + StorageKey::new(AccountTreeId::new(evm_address), H256::zero()), + H256::from_low_u64_be(1), + ); + } + } + + VmTesterBuilder::new() + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build() + } +} + +#[test] +fn tracing_evm_contract_deployment() { + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + override_system_contracts(&mut storage); + + let mut system_env = default_system_env(); + // The EVM emulator will not be accessed, so we set it to a dummy value. + system_env.base_system_smart_contracts.evm_emulator = + Some(system_env.base_system_smart_contracts.default_aa.clone()); + let mut vm = VmTesterBuilder::new() + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; + + let args = [Token::Bytes((0..32).collect())]; + let evm_bytecode = ethabi::encode(&args); + let expected_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); + let deploy_tx = account.get_l2_tx_for_execute(execute, None); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Check that the surrogate EVM bytecode was added to the decommitter. + let known_bytecodes = vm.vm.state.decommittment_processor.known_bytecodes.inner(); + let known_evm_bytecode = + be_words_to_bytes(&known_bytecodes[&h256_to_u256(expected_bytecode_hash)]); + assert_eq!(known_evm_bytecode, evm_bytecode); + + let new_known_factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!(new_known_factory_deps.len(), 2); // the deployed EraVM contract + EVM contract + assert_eq!( + new_known_factory_deps[&expected_bytecode_hash], + evm_bytecode + ); +} + +#[test] +fn mock_emulator_basics() { + let called_address = Address::repeat_byte(0x23); + let mut vm = EvmTestBuilder::new(true, called_address).build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(called_address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +const RECIPIENT_ADDRESS: Address = Address::repeat_byte(0x12); + +/// `deploy_emulator = false` here and below tests the mock emulator as an ordinary contract (i.e., sanity-checks its logic). +#[test_casing(2, [false, true])] +#[test] +fn mock_emulator_with_payment(deploy_emulator: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let mut vm = EvmTestBuilder::new(deploy_emulator, RECIPIENT_ADDRESS).build(); + + let mut current_balance = U256::zero(); + for i in 1_u64..=5 { + let transferred_value = (1_000_000_000 * i).into(); + let vm_result = test_payment( + &mut vm, + &mock_emulator_abi, + &mut current_balance, + transferred_value, + ); + + let balance_storage_logs = vm_result.logs.storage_logs.iter().filter_map(|log| { + (*log.log.key.address() == L2_BASE_TOKEN_ADDRESS) + .then_some((*log.log.key.key(), h256_to_u256(log.log.value))) + }); + let balances: HashMap<_, _> = balance_storage_logs.collect(); + assert_eq!( + balances[&key_for_eth_balance(&RECIPIENT_ADDRESS)], + current_balance + ); + } +} + +fn test_payment( + vm: &mut VmTester, + mock_emulator_abi: ðabi::Contract, + balance: &mut U256, + transferred_value: U256, +) -> VmExecutionResultAndLogs { + *balance += transferred_value; + let test_payment_fn = mock_emulator_abi.function("testPayment").unwrap(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(RECIPIENT_ADDRESS), + calldata: test_payment_fn + .encode_input(&[Token::Uint(transferred_value), Token::Uint(*balance)]) + .unwrap(), + value: transferred_value, + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + vm_result +} + +#[test_casing(4, Product(([false, true], [false, true])))] +#[test] +fn mock_emulator_with_recursion(deploy_emulator: bool, is_external: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(deploy_emulator, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + let test_recursion_fn = mock_emulator_abi + .function(if is_external { + "testExternalRecursion" + } else { + "testRecursion" + }) + .unwrap(); + let mut expected_value = U256::one(); + let depth = 50_u32; + for i in 2..=depth { + expected_value *= i; + } + + let factory_deps = if is_external { + vec![read_bytecode(RECURSIVE_CONTRACT_PATH)] + } else { + vec![] + }; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(recipient_address), + calldata: test_recursion_fn + .encode_input(&[Token::Uint(depth.into()), Token::Uint(expected_value)]) + .unwrap(), + value: 0.into(), + factory_deps, + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn calling_to_mock_emulator_from_native_contract() { + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(true, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(RECURSIVE_CONTRACT_PATH); + let native_contract_abi = load_contract(RECURSIVE_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx( + &native_contract, + Some(&[Token::Address(recipient_address)]), + TxType::L2, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Call from the native contract to the EVM emulator. + let test_fn = native_contract_abi.function("recurse").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: test_fn.encode_input(&[Token::Uint(50.into())]).unwrap(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test] +fn mock_emulator_with_deployment() { + let contract_address = Address::repeat_byte(0xaa); + let mut vm = EvmTestBuilder::new(true, contract_address) + .with_mock_deployer() + .build(); + let account = &mut vm.rich_accounts[0]; + + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let new_evm_bytecode = vec![0xfe; 96]; + let new_evm_bytecode_hash = hash_evm_bytecode(&new_evm_bytecode); + + let test_fn = mock_emulator_abi.function("testDeploymentAndCall").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: test_fn + .encode_input(&[ + Token::FixedBytes(new_evm_bytecode_hash.0.into()), + Token::Bytes(new_evm_bytecode.clone()), + ]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + + let factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!( + factory_deps, + HashMap::from([(new_evm_bytecode_hash, new_evm_bytecode)]) + ); +} + +#[test] +fn mock_emulator_with_delegate_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testDelegateCall").unwrap(); + // Delegate to the native contract from EVM. + test_delegate_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address); + // Delegate to EVM from the native contract. + test_delegate_call(&mut vm, test_fn, deploy_tx.address, evm_contract_address); + // Delegate to EVM from EVM. + test_delegate_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + ); +} + +fn test_delegate_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn.encode_input(&[Token::Address(to)]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn mock_emulator_with_static_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + // Set differing read values for tested contracts. The slot index is defined in the contract. + let value_slot = H256::from_low_u64_be(0x123); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(evm_contract_address), value_slot), + H256::from_low_u64_be(100), + ); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(other_evm_contract_address), value_slot), + H256::from_low_u64_be(200), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testStaticCall").unwrap(); + // Call to the native contract from EVM. + test_static_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address, 0); + // Call to EVM from the native contract. + test_static_call( + &mut vm, + test_fn, + deploy_tx.address, + evm_contract_address, + 100, + ); + // Call to EVM from EVM. + test_static_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + 200, + ); +} + +fn test_static_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, + expected_value: u64, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn + .encode_input(&[Token::Address(to), Token::Uint(expected_value.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs index cc9aac5bb91..5aa7ab9e9c7 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs @@ -1,46 +1,9 @@ -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Execute}; - use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_latest::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, + versions::testonly::gas_limit::test_tx_gas_limit_offset, + vm_latest::{HistoryEnabled, Vm}, }; -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. #[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Account::default_fee() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); +fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index ef19717a627..7f39915f2b6 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -1,247 +1,22 @@ -use std::{ - collections::{HashMap, HashSet}, - iter, - str::FromStr, -}; - -use assert_matches::assert_matches; -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; -use zk_evm_1_5_0::{ - abstractions::DecommittmentProcessor, - aux_structures::{DecommittmentQuery, MemoryPage, Timestamp}, - zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, -}; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Address, Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; -use zksync_vm_interface::VmExecutionResultAndLogs; - use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceExt, + versions::testonly::get_used_contracts::{ + test_get_used_contracts, test_get_used_contracts_with_far_call, + test_get_used_contracts_with_out_of_gas_far_call, }, - vm_latest::{ - tests::{ - tester::{TxType, VmTester, VmTesterBuilder}, - utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata: big_calldata, - value: Default::default(), - factory_deps: vec![vec![1; 32]], - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -#[test] -fn test_contract_is_used_right_after_prepare_to_decommit() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(vm.vm.get_used_contracts().is_empty()); - - let bytecode_hash = - U256::from_str("0x100067ff3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185") - .unwrap(); - vm.vm - .state - .decommittment_processor - .populate(vec![(bytecode_hash, vec![])], Timestamp(0)); - - let header = hex::decode("0100067f").unwrap(); - let normalized_preimage = - hex::decode("f3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185").unwrap(); - vm.vm - .state - .decommittment_processor - .prepare_to_decommit( - 0, - DecommittmentQuery { - header: VersionedHashHeader(header.try_into().unwrap()), - normalized_preimage: VersionedHashNormalizedPreimage( - normalized_preimage.try_into().unwrap(), - ), - timestamp: Timestamp(0), - memory_page: MemoryPage(0), - decommitted_length: 0, - is_fresh: false, - }, - ) - .unwrap(); - - assert_eq!(vm.vm.get_used_contracts(), vec![bytecode_hash]); -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - known_bytecodes_without_aa_code -} - -/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial -/// decommitment cost (>10,000 gas). -fn inflated_counter_bytecode() -> Vec { - let mut counter_bytecode = read_test_contract(); - counter_bytecode.extend( - iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) - .take(10_000) - .flatten(), - ); - counter_bytecode -} - -fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { - let counter_bytecode = inflated_counter_bytecode(); - let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); - let counter_address = Address::repeat_byte(0x23); - - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx( - &proxy_counter_bytecode, - Some(&[Token::Address(counter_address)]), - TxType::L2, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - !decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(deploy_tx.address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - (vm, counter_bytecode_hash, exec_result) +fn get_used_contracts() { + test_get_used_contracts::>(); } #[test] fn get_used_contracts_with_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_far_call::>(); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); - assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_out_of_gas_far_call::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs index 8206cfa9be6..193fc586079 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs @@ -1,49 +1,9 @@ -use zksync_types::get_nonce_key; - use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - vm_latest::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, + versions::testonly::is_write_initial::test_is_write_initial_behaviour, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); +fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs index f8e4934050b..fcb718c7349 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs @@ -1,158 +1,156 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_types::{web3::keccak256, Execute, L1_MESSENGER_ADDRESS, U256}; -use zksync_utils::{address_to_h256, u256_to_h256}; -use zksync_vm_interface::VmInterfaceExt; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::ZK_SYNC_BYTES_PER_BLOB, - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::{ - pubdata::{PubdataBuilder, RollupPubdataBuilder}, - PubdataInput, - }, - HistoryEnabled, - }, -}; - -pub(crate) const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 5; -pub(crate) const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 6; - -pub(crate) fn encoded_uncompressed_state_diffs(input: &PubdataInput) -> Vec { - let mut result = vec![]; - for state_diff in input.state_diffs.iter() { - result.extend(state_diff.encode_padded()); - } - result -} - -pub fn compose_header_for_l1_commit_rollup(input: PubdataInput) -> Vec { - // The preimage under the hash `l2DAValidatorOutputHash` is expected to be in the following format: - // - First 32 bytes are the hash of the uncompressed state diff. - // - Then, there is a 32-byte hash of the full pubdata. - // - Then, there is the 1-byte number of blobs published. - // - Then, there are linear hashes of the published blobs, 32 bytes each. - - let mut full_header = vec![]; - - let uncompressed_state_diffs = encoded_uncompressed_state_diffs(&input); - let uncompressed_state_diffs_hash = keccak256(&uncompressed_state_diffs); - full_header.extend(uncompressed_state_diffs_hash); - - let mut full_pubdata = RollupPubdataBuilder::new().build_pubdata(input, false); - let full_pubdata_hash = keccak256(&full_pubdata); - full_header.extend(full_pubdata_hash); - - // Now, we need to calculate the linear hashes of the blobs. - // Firstly, let's pad the pubdata to the size of the blob. - if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { - let padding = - vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; - full_pubdata.extend(padding); - } - full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); - - full_pubdata - .chunks(ZK_SYNC_BYTES_PER_BLOB) - .for_each(|chunk| { - full_header.extend(keccak256(chunk)); - }); - - full_header -} - -#[test] -fn test_publish_and_clear_state() { - // In this test, we check whether the L2 DA output hash is as expected. - // We will publish 320kb worth of pubdata. - // It should produce 3 blobs. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, deploy tx. It should publish the bytecode of the "test contract" - let counter = read_test_contract(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - // We do not use compression here, to have the bytecode published in full. - vm.vm.push_transaction_with_compression(tx, false); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - // Then, we call the l1 messenger to also send an L2->L1 message. - let l1_messenger_contract = l1_messenger_contract(); - let encoded_data = l1_messenger_contract - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(vec![])]) - .unwrap(); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - calldata: encoded_data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - if batch_result.result.is_failed() { - panic!("Batch execution failed: {:?}", batch_result.result); - } - assert!( - !batch_result.result.is_failed(), - "Transaction wasn't successful" - ); - let pubdata_input = vm.vm.bootloader_state.get_pubdata_information().clone(); - - // Just to double check that the test makes sense. - assert!(!pubdata_input.user_logs.is_empty()); - assert!(!pubdata_input.l2_to_l1_messages.is_empty()); - assert!(!pubdata_input.published_bytecodes.is_empty()); - assert!(!pubdata_input.state_diffs.is_empty()); - - let expected_header: Vec = compose_header_for_l1_commit_rollup(pubdata_input); - - let l2_da_validator_output_hash = batch_result - .logs - .system_l2_to_l1_logs - .iter() - .find(|log| log.0.key == u256_to_h256(L2_DA_VALIDATOR_OUTPUT_HASH_KEY.into())) - .unwrap() - .0 - .value; - - assert_eq!( - l2_da_validator_output_hash, - keccak256(&expected_header).into() - ); - - let l2_used_da_validator_address = batch_result - .logs - .system_l2_to_l1_logs - .iter() - .find(|log| log.0.key == u256_to_h256(USED_L2_DA_VALIDATOR_ADDRESS_KEY.into())) - .unwrap() - .0 - .value; - - assert_eq!( - l2_used_da_validator_address, - address_to_h256(&vm.vm.system_env.pubdata_params.l2_da_validator_address) - ); -} +// // TODO: move to shared tests +// +// use ethabi::Token; +// use zksync_contracts::l1_messenger_contract; +// use zksync_types::{web3::keccak256, Execute, L1_MESSENGER_ADDRESS, U256}; +// use zksync_utils::{address_to_h256, u256_to_h256}; +// +// use crate::{ +// interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, pubdata::{PubdataInput, PubdataBuilder}}, +// vm_latest::{ +// constants::ZK_SYNC_BYTES_PER_BLOB, +// tests::{ +// tester::{DeployContractsTx, TxType, VmTesterBuilder}, +// utils::read_test_contract, +// }, +// HistoryEnabled, +// }, +// pubdata_builders::RollupPubdataBuilder +// }; +// +// pub(crate) const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 5; +// pub(crate) const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 6; +// +// pub(crate) fn encoded_uncompressed_state_diffs(input: &PubdataInput) -> Vec { +// let mut result = vec![]; +// for state_diff in input.state_diffs.iter() { +// result.extend(state_diff.encode_padded()); +// } +// result +// } +// +// pub fn compose_header_for_l1_commit_rollup(input: PubdataInput) -> Vec { +// // The preimage under the hash `l2DAValidatorOutputHash` is expected to be in the following format: +// // - First 32 bytes are the hash of the uncompressed state diff. +// // - Then, there is a 32-byte hash of the full pubdata. +// // - Then, there is the 1-byte number of blobs published. +// // - Then, there are linear hashes of the published blobs, 32 bytes each. +// +// let mut full_header = vec![]; +// +// let uncompressed_state_diffs = encoded_uncompressed_state_diffs(&input); +// let uncompressed_state_diffs_hash = keccak256(&uncompressed_state_diffs); +// full_header.extend(uncompressed_state_diffs_hash); +// +// let mut full_pubdata = RollupPubdataBuilder::new().build_pubdata(input, false); +// let full_pubdata_hash = keccak256(&full_pubdata); +// full_header.extend(full_pubdata_hash); +// +// // Now, we need to calculate the linear hashes of the blobs. +// // Firstly, let's pad the pubdata to the size of the blob. +// if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { +// let padding = +// vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; +// full_pubdata.extend(padding); +// } +// full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); +// +// full_pubdata +// .chunks(ZK_SYNC_BYTES_PER_BLOB) +// .for_each(|chunk| { +// full_header.extend(keccak256(chunk)); +// }); +// +// full_header +// } +// +// #[test] +// fn test_publish_and_clear_state() { +// // In this test, we check whether the L2 DA output hash is as expected. +// // We will publish 320kb worth of pubdata. +// // It should produce 3 blobs. +// +// let mut vm = VmTesterBuilder::new(HistoryEnabled) +// .with_empty_in_memory_storage() +// .with_execution_mode(TxExecutionMode::VerifyExecute) +// .with_random_rich_accounts(1) +// .build(); +// +// let account = &mut vm.rich_accounts[0]; +// +// // Firstly, deploy tx. It should publish the bytecode of the "test contract" +// let counter = read_test_contract(); +// +// let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); +// // We do not use compression here, to have the bytecode published in full. +// vm.vm.push_transaction_with_compression(tx, false); +// let result = vm.vm.execute(VmExecutionMode::OneTx); +// assert!(!result.result.is_failed(), "Transaction wasn't successful"); +// +// // Then, we call the l1 messenger to also send an L2->L1 message. +// let l1_messenger_contract = l1_messenger_contract(); +// let encoded_data = l1_messenger_contract +// .function("sendToL1") +// .unwrap() +// .encode_input(&[Token::Bytes(vec![])]) +// .unwrap(); +// +// let tx = account.get_l2_tx_for_execute( +// Execute { +// contract_address: Some(L1_MESSENGER_ADDRESS), +// calldata: encoded_data, +// value: U256::zero(), +// factory_deps: vec![], +// }, +// None, +// ); +// vm.vm.push_transaction(tx); +// let result = vm.vm.execute(VmExecutionMode::OneTx); +// assert!(!result.result.is_failed(), "Transaction wasn't successful"); +// +// let batch_result = vm.vm.execute(VmExecutionMode::Batch); +// if batch_result.result.is_failed() { +// panic!("Batch execution failed: {:?}", batch_result.result); +// } +// assert!( +// !batch_result.result.is_failed(), +// "Transaction wasn't successful" +// ); +// let pubdata_input = vm.vm.bootloader_state.get_pubdata_information().clone(); +// +// // Just to double check that the test makes sense. +// assert!(!pubdata_input.user_logs.is_empty()); +// assert!(!pubdata_input.l2_to_l1_messages.is_empty()); +// assert!(!pubdata_input.published_bytecodes.is_empty()); +// assert!(!pubdata_input.state_diffs.is_empty()); +// +// let expected_header: Vec = compose_header_for_l1_commit_rollup(pubdata_input); +// +// let l2_da_validator_output_hash = batch_result +// .logs +// .system_l2_to_l1_logs +// .iter() +// .find(|log| log.0.key == u256_to_h256(L2_DA_VALIDATOR_OUTPUT_HASH_KEY.into())) +// .unwrap() +// .0 +// .value; +// +// assert_eq!( +// l2_da_validator_output_hash, +// keccak256(&expected_header).into() +// ); +// +// let l2_used_da_validator_address = batch_result +// .logs +// .system_l2_to_l1_logs +// .iter() +// .find(|log| log.0.key == u256_to_h256(USED_L2_DA_VALIDATOR_ADDRESS_KEY.into())) +// .unwrap() +// .0 +// .value; +// +// assert_eq!( +// l2_used_da_validator_address, +// address_to_h256(&vm.vm.system_env.pubdata_params.l2_da_validator_address) +// ); +// } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index b424567aab0..4b7429c2829 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,195 +1,16 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_test_account::Account; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, K256PrivateKey, U256, -}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::StorageWritesDeduplicator, - vm_latest::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, + versions::testonly::l1_tx_execution::{ + test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, }, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 9 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - `gasPerPubdataByte` - // - `basePubdataSpent` - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. - let basic_initial_writes = 5; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes, basic_initial_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. - // But now the base pubdata spent has changed too. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); +fn l1_tx_execution() { + test_l1_tx_execution::>(); } #[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![Account::new( - K256PrivateKey::from_bytes([0xad; 32].into()).unwrap(), - )]) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - value: 0.into(), - factory_deps: vec![], - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); +fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs index 1b5c3db59f7..82003b4a6ab 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs @@ -1,433 +1,33 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, L2BlockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, - ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, - }, - vm_latest::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, + versions::testonly::l2_blocks::{ + test_l2_block_first_in_batch, test_l2_block_initialization_number_non_zero, + test_l2_block_initialization_timestamp, test_l2_block_new_l2_block, + test_l2_block_same_l2_block, }, - HistoryMode, + vm_latest::{HistoryEnabled, Vm}, }; -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute::default(), - received_timestamp_ms: 0, - raw_bytes: None, - } -} - #[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); +fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::>(); } #[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::>(); } #[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::>(); } #[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::>(); } #[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) +fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs b/core/lib/multivm/src/versions/vm_latest/tests/migration.rs deleted file mode 100644 index 5b8da255180..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs +++ /dev/null @@ -1,51 +0,0 @@ -use zksync_types::{get_code_key, H256, SYSTEM_CONTEXT_ADDRESS}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -/// This test checks that the new bootloader will work fine even if the previous system context contract is not -/// compatible with it, i.e. the bootloader will upgrade it before starting any transaction. -#[test] -fn test_migration_for_system_context_aa_interaction() { - let mut storage = get_empty_storage(); - // We will set the system context bytecode to zero. - storage.set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::zero()); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Now, we will just proceed with standard transaction execution. - // The bootloader should be able to update system context regardless of whether - // the upgrade transaction is there or not. - let account = &mut vm.rich_accounts[0]; - let counter = read_test_contract(); - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful {:#?}", - result.result - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!( - !batch_result.result.is_failed(), - "Batch transaction wasn't successful {:#?}", - batch_result.result - ); -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index cc370f3906e..9d75aba9208 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -1,3 +1,34 @@ +use std::{ + collections::{HashMap, HashSet}, + rc::Rc, +}; + +use zk_evm_1_5_0::{ + aux_structures::{MemoryPage, Timestamp}, + vm_state::VmLocalState, + zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, +}; +use zksync_types::{writes::StateDiffRecord, StorageKey, StorageValue, Transaction, H256, U256}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_vm_interface::pubdata::PubdataBuilder; + +use super::{HistoryEnabled, Vm}; +use crate::{ + interface::{ + storage::{InMemoryStorage, ReadStorage, StorageView, WriteStorage}, + CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, + }, + versions::testonly::{filter_out_base_system_contracts, TestedVm}, + vm_latest::{ + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{event_sink::InMemoryEventSink, history_recorder::HistoryRecorder}, + tracers::PubdataTracer, + types::internals::TransactionData, + utils::logs::StorageLogQuery, + AppDataFrameManagerWithHistory, HistoryMode, SimpleMemory, TracerDispatcher, + }, +}; + mod bootloader; mod default_aa; // TODO - fix this test @@ -8,6 +39,7 @@ mod call_tracer; mod circuits; mod code_oracle; mod constants; +mod evm_emulator; mod gas_limit; mod get_used_contracts; mod is_write_initial; @@ -20,11 +52,254 @@ mod prestate_tracer; mod refunds; mod require_eip712; mod rollbacks; -mod sekp256r1; +mod secp256r1; mod simple_execution; mod storage; -mod tester; mod tracing_execution_error; mod transfer; mod upgrade; -mod utils; + +type TestedLatestVm = Vm, HistoryEnabled>; + +impl TestedVm for TestedLatestVm { + type StateDump = VmInstanceInnerState; + + fn dump_state(&self) -> Self::StateDump { + self.dump_inner_state() + } + + fn gas_remaining(&mut self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_current_execution_state() + } + + fn decommitted_hashes(&self) -> HashSet { + self.get_used_contracts().into_iter().collect() + } + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs { + let pubdata_tracer = PubdataTracer::new_with_forced_state_diffs( + self.batch_env.clone(), + VmExecutionMode::Batch, + diffs, + crate::vm_latest::MultiVMSubversion::latest(), + Some(pubdata_builder), + ); + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + Some(pubdata_tracer), + ) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + let bytecodes = bytecodes + .iter() + .map(|&bytecode| { + let hash = hash_bytecode(bytecode); + let words = bytes_to_be_words(bytecode.to_vec()); + (h256_to_u256(hash), words) + }) + .collect(); + self.state + .decommittment_processor + .populate(bytecodes, Timestamp(0)); + } + + fn known_bytecode_hashes(&self) -> HashSet { + let mut bytecode_hashes: HashSet<_> = self + .state + .decommittment_processor + .known_bytecodes + .inner() + .keys() + .copied() + .collect(); + filter_out_base_system_contracts(&mut bytecode_hashes); + bytecode_hashes + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + let (header, normalized_preimage) = + ContractCodeSha256Format::normalize_for_decommitment(&code_hash.0); + let query = self + .state + .prepare_to_decommit( + 0, + header, + normalized_preimage, + MemoryPage(123), + Timestamp(0), + ) + .unwrap(); + self.state.execute_decommit(0, query).unwrap(); + query.is_fresh + } + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]) { + for &(slot, required_value) in cells { + let current_value = self + .state + .memory + .read_slot(BOOTLOADER_HEAP_PAGE as usize, slot as usize) + .value; + assert_eq!(current_value, required_value); + } + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + let timestamp = Timestamp(self.state.local_state.timestamp); + self.state + .memory + .populate_page(BOOTLOADER_HEAP_PAGE as usize, cells.to_vec(), timestamp) + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + self.state.storage.storage.read_from_storage(&key) + } + + fn last_l2_block_hash(&self) -> H256 { + self.bootloader_state.last_l2_block().get_hash() + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.bootloader_state.push_l2_block(block); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + let tx = TransactionData::new(tx, false); + let overhead = tx.overhead_gas(); + self.push_raw_transaction(tx, overhead, refund, true) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct ModifiedKeysMap(HashMap); + +impl ModifiedKeysMap { + fn new(storage: &mut StorageView) -> Self { + let mut modified_keys = storage.modified_storage_keys().clone(); + let inner = storage.inner_mut(); + // Remove modified keys that were set to the same value (e.g., due to a rollback). + modified_keys.retain(|key, value| inner.read_value(key) != *value); + Self(modified_keys) + } +} + +// We consider hashmaps to be equal even if there is a key +// that is not present in one but has zero value in another. +impl PartialEq for ModifiedKeysMap { + fn eq(&self, other: &Self) -> bool { + for (key, value) in &self.0 { + if *value != other.0.get(key).copied().unwrap_or_default() { + return false; + } + } + for (key, value) in &other.0 { + if *value != self.0.get(key).copied().unwrap_or_default() { + return false; + } + } + true + } +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct DecommitterTestInnerState { + /// There is no way to "truly" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + pub(crate) known_bytecodes: HistoryRecorder>, H>, + pub(crate) decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct StorageOracleInnerState { + /// There is no way to "truly" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, + pub(crate) paid_changes: HistoryRecorder, H>, + pub(crate) initial_values: HistoryRecorder, H>, + pub(crate) returned_io_refunds: HistoryRecorder, H>, + pub(crate) returned_pubdata_costs: HistoryRecorder, H>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct PrecompileProcessorTestInnerState { + pub(crate) timestamp_history: HistoryRecorder, H>, +} + +/// A struct that encapsulates the state of the VM's oracles +/// The state is to be used in tests. +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct VmInstanceInnerState { + event_sink: InMemoryEventSink, + precompile_processor_state: PrecompileProcessorTestInnerState, + memory: SimpleMemory, + decommitter_state: DecommitterTestInnerState, + storage_oracle_state: StorageOracleInnerState, + local_state: VmLocalState, +} + +impl Vm, H> { + // Dump inner state of the VM. + pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { + let event_sink = self.state.event_sink.clone(); + let precompile_processor_state = PrecompileProcessorTestInnerState { + timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), + }; + let memory = self.state.memory.clone(); + let decommitter_state = DecommitterTestInnerState { + modified_storage_keys: ModifiedKeysMap::new( + &mut self + .state + .decommittment_processor + .get_storage() + .borrow_mut(), + ), + known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), + decommitted_code_hashes: self + .state + .decommittment_processor + .get_decommitted_code_hashes_with_history() + .clone(), + }; + + let storage_oracle_state = StorageOracleInnerState { + modified_storage_keys: ModifiedKeysMap::new( + &mut self.state.storage.storage.get_ptr().borrow_mut(), + ), + frames_stack: self.state.storage.storage_frames_stack.clone(), + paid_changes: self.state.storage.paid_changes.clone(), + initial_values: self.state.storage.initial_values.clone(), + returned_io_refunds: self.state.storage.returned_io_refunds.clone(), + returned_pubdata_costs: self.state.storage.returned_pubdata_costs.clone(), + }; + let local_state = self.state.local_state.clone(); + + VmInstanceInnerState { + event_sink, + precompile_processor_state, + memory, + decommitter_state, + storage_oracle_state, + local_state, + } + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 15c678ba953..c7ea3242d4a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -1,191 +1,9 @@ -use zksync_types::{Execute, Nonce}; - use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, - VmRevertReason, - }, - vm_latest::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, + versions::testonly::nonce_holder::test_nonce_holder, + vm_latest::{HistoryEnabled, Vm}, }; -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - #[test] -fn test_nonce_holder() { - let mut account = Account::random(); - // let hex_addr = hex::encode(account.address.to_fixed_bytes()); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - // TODO - let mut _run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // TODO reenable. - // // Test 1: trying to set value under non sequential nonce value. - // run_nonce_test( - // 1u32, - // NonceHolderTestMode::SetValueUnderNonce, - // Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), - // "Allowed to set value under non sequential value", - // ); - - // // Test 2: increase min nonce by 1 with sequential nonce ordering: - // run_nonce_test( - // 0u32, - // NonceHolderTestMode::IncreaseMinNonceBy1, - // None, - // "Failed to increment nonce by 1 for sequential account", - // ); - - // // Test 3: correctly set value under nonce with sequential nonce ordering: - // run_nonce_test( - // 1u32, - // NonceHolderTestMode::SetValueUnderNonce, - // None, - // "Failed to set value under nonce sequential value", - // ); - - // // Test 5: migrate to the arbitrary nonce ordering: - // run_nonce_test( - // 2u32, - // NonceHolderTestMode::SwitchToArbitraryOrdering, - // None, - // "Failed to switch to arbitrary ordering", - // ); - - // // Test 6: increase min nonce by 5 - // run_nonce_test( - // 6u32, - // NonceHolderTestMode::IncreaseMinNonceBy5, - // None, - // "Failed to increase min nonce by 5", - // ); - - // // Test 7: since the nonces in range [6,10] are no longer allowed, the - // // tx with nonce 10 should not be allowed - // run_nonce_test( - // 10u32, - // NonceHolderTestMode::IncreaseMinNonceBy5, - // Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), - // "Allowed to reuse nonce below the minimal one", - // ); - - // // Test 8: we should be able to use nonce 13 - // run_nonce_test( - // 13u32, - // NonceHolderTestMode::SetValueUnderNonce, - // None, - // "Did not allow to use unused nonce 10", - // ); - - // // Test 9: we should not be able to reuse nonce 13 - // run_nonce_test( - // 13u32, - // NonceHolderTestMode::IncreaseMinNonceBy5, - // Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), - // "Allowed to reuse the same nonce twice", - // ); - - // // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - // run_nonce_test( - // 14u32, - // NonceHolderTestMode::IncreaseMinNonceBy5, - // None, - // "Did not allow to use a bumped nonce", - // ); - - // // Test 11: Do not allow bumping nonce by too much - // run_nonce_test( - // 16u32, - // NonceHolderTestMode::IncreaseMinNonceTooMuch, - // Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), - // "Allowed for incrementing min nonce too much", - // ); - - // // Test 12: Do not allow not setting a nonce as used - // run_nonce_test( - // 16u32, - // NonceHolderTestMode::LeaveNonceUnused, - // Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), - // "Allowed to leave nonce as unused", - // ); +fn nonce_holder() { + test_nonce_holder::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs index 110b14146c7..7ef45721ea5 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs @@ -1,142 +1,19 @@ -use zk_evm_1_5_0::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, + versions::testonly::precompiles::{test_ecrecover, test_keccak, test_sha256}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); +fn keccak() { + test_keccak::>(); } #[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); +fn sha256() { + test_sha256::>(); } #[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account.address), - calldata: Vec::new(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); +fn ecrecover() { + test_ecrecover::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index 230b1d0ad87..7028f7a8971 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -4,25 +4,22 @@ use once_cell::sync::OnceCell; use zksync_test_account::TxType; use zksync_types::{utils::deployed_address_create, Execute, U256}; +use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface, VmInterfaceExt}, tracers::PrestateTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, + versions::testonly::{read_simple_transfer_contract, VmTesterBuilder}, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, }; #[test] fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); vm.deploy_test_contract(); let account = &mut vm.rich_accounts[0]; @@ -41,7 +38,7 @@ fn test_prestate_tracer() { let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Batch); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::OneTx); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() @@ -53,37 +50,27 @@ fn test_prestate_tracer() { #[test] fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; + let account = &mut vm.rich_accounts[0]; + let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce = tx.nonce().unwrap().0.into(); vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); + vm.vm.execute(InspectExecutionMode::OneTx); + let deployed_address = deployed_address_create(account.address, nonce); vm.test_contract = Some(deployed_address); // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; + let tx2 = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce2 = tx2.nonce().unwrap().0.into(); vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); + vm.vm.execute(InspectExecutionMode::OneTx); + let deployed_address2 = deployed_address_create(account.address, nonce2); let account = &mut vm.rich_accounts[0]; @@ -111,7 +98,7 @@ fn test_prestate_tracer_diff_mode() { let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Bootloader); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::Bootloader); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index e494a45f35b..dfbec170682 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -1,234 +1,16 @@ -use ethabi::Token; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{read_expensive_contract, read_test_contract}, - }, - types::internals::TransactionData, - HistoryEnabled, + versions::testonly::refunds::{ + test_negative_pubdata_for_transaction, test_predetermined_refunded_gas, }, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - // We need to provide the same DA validator to ensure the same logs - let rollup_da_validator = Address::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_rollup_pubdata_params(Some(rollup_da_validator)) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .with_rollup_pubdata_params(Some(rollup_da_validator)) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .with_rollup_pubdata_params(Some(rollup_da_validator)) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .deduplicated_storage_logs - .len(), - current_state_without_predefined_refunds - .deduplicated_storage_logs - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); +fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::>(); } #[test] fn negative_pubdata_for_transaction() { - let expensive_contract_address = Address::random(); - let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); - let expensive_function = expensive_contract.function("expensive").unwrap(); - let cleanup_function = expensive_contract.function("cleanUp").unwrap(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - expensive_contract_bytecode, - expensive_contract_address, - false, - )]) - .build(); - - let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: expensive_function - .encode_input(&[Token::Uint(10.into())]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. - let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: cleanup_function.encode_input(&[]).unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - assert!(result.refunds.operator_suggested_refund > 0); - assert_eq!( - result.refunds.gas_refunded, - result.refunds.operator_suggested_refund - ); + test_negative_pubdata_for_transaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index cdd71354c8d..470ddb28699 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -1,167 +1,9 @@ -use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, + versions::testonly::require_eip712::test_require_eip712, + vm_latest::{HistoryEnabled, Vm}, }; -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account_abstraction.address), - calldata: encoded_input, - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.into(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - Some(beneficiary.address), - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - vec![], - Default::default(), - ); - - let mut transaction_request: TransactionRequest = tx_712.into(); - transaction_request.chain_id = Some(chain_id.into()); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.into(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); +#[test] +fn require_eip712() { + test_require_eip712::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index a850053619b..de674498427 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,182 +1,36 @@ use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; +use zksync_test_account::{DeployContractsTx, TxType}; use zksync_types::{get_nonce_key, U256}; +use zksync_vm_interface::InspectExecutionMode; +use super::TestedLatestVm; use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + TxExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, + versions::testonly::{ + rollbacks::{test_rollback_in_call_mode, test_vm_loadnext_rollbacks, test_vm_rollbacks}, + VmTesterBuilder, + }, vm_latest::{ - tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, + types::internals::ZkSyncVmState, BootloaderState, HistoryEnabled, HistoryMode, + SimpleMemory, ToTracerPointer, Vm, VmTracer, }, }; -// #[test] -// fn test_vm_rollbacks() { -// let mut vm = VmTesterBuilder::new(HistoryEnabled) -// .with_empty_in_memory_storage() -// .with_execution_mode(TxExecutionMode::VerifyExecute) -// .with_random_rich_accounts(1) -// .build(); - -// let mut account = vm.rich_accounts[0].clone(); -// let counter = read_test_contract(); -// let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; -// let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; -// let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - -// let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ -// TransactionTestInfo::new_processed(tx_0.clone(), false), -// TransactionTestInfo::new_processed(tx_1.clone(), false), -// TransactionTestInfo::new_processed(tx_2.clone(), false), -// ]); - -// // reset vm -// vm.reset_with_empty_storage(); - -// let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ -// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), -// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), -// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), -// // The correct nonce is 0, this tx will fail -// TransactionTestInfo::new_rejected( -// tx_2.clone(), -// TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), -// ), -// // This tx will succeed -// TransactionTestInfo::new_processed(tx_0.clone(), false), -// // The correct nonce is 1, this tx will fail -// TransactionTestInfo::new_rejected( -// tx_0.clone(), -// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), -// ), -// // The correct nonce is 1, this tx will fail -// TransactionTestInfo::new_rejected( -// tx_2.clone(), -// TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), -// ), -// // This tx will succeed -// TransactionTestInfo::new_processed(tx_1, false), -// // The correct nonce is 2, this tx will fail -// TransactionTestInfo::new_rejected( -// tx_0.clone(), -// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), -// ), -// // This tx will succeed -// TransactionTestInfo::new_processed(tx_2.clone(), false), -// // This tx will fail -// TransactionTestInfo::new_rejected( -// tx_2.clone(), -// TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), -// ), -// TransactionTestInfo::new_rejected( -// tx_0.clone(), -// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), -// ), -// ]); - -// // assert_eq!(result_without_rollbacks, result_with_rollbacks); -// } - -// #[test] -// fn test_vm_loadnext_rollbacks() { -// let mut vm = VmTesterBuilder::new(HistoryEnabled) -// .with_empty_in_memory_storage() -// .with_execution_mode(TxExecutionMode::VerifyExecute) -// .with_random_rich_accounts(1) -// .build(); -// let mut account = vm.rich_accounts[0].clone(); - -// let loadnext_contract = get_loadnext_contract(); -// let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; -// let DeployContractsTx { -// tx: loadnext_deploy_tx, -// address, -// .. -// } = account.get_deploy_tx_with_factory_deps( -// &loadnext_contract.bytecode, -// Some(loadnext_constructor_data), -// loadnext_contract.factory_deps.clone(), -// TxType::L2, -// ); - -// let loadnext_tx_1 = account.get_l2_tx_for_execute( -// Execute { -// contract_address: Some(address), -// calldata: LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// } -// .to_bytes(), -// value: Default::default(), -// factory_deps: vec![], -// }, -// None, -// ); - -// let loadnext_tx_2 = account.get_l2_tx_for_execute( -// Execute { -// contract_address: Some(address), -// calldata: LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// } -// .to_bytes(), -// value: Default::default(), -// factory_deps: vec![], -// }, -// None, -// ); - -// // let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ -// // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), -// // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), -// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), -// // ]); - -// // TODO: reset vm -// // vm.reset_with_empty_storage(); - -// // let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ -// // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), -// // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), -// // TransactionTestInfo::new_rejected( -// // loadnext_deploy_tx.clone(), -// // TxModifier::NonceReused( -// // loadnext_deploy_tx.initiator_account(), -// // loadnext_deploy_tx.nonce().unwrap(), -// // ) -// // .into(), -// // ), -// // TransactionTestInfo::new_processed(loadnext_tx_1, false), -// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), -// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), -// // TransactionTestInfo::new_rejected( -// // loadnext_deploy_tx.clone(), -// // TxModifier::NonceReused( -// // loadnext_deploy_tx.initiator_account(), -// // loadnext_deploy_tx.nonce().unwrap(), -// // ) -// // .into(), -// // ), -// // TransactionTestInfo::new_processed(loadnext_tx_2, false), -// // ]); +#[test] +fn vm_rollbacks() { + test_vm_rollbacks::>(); +} -// // assert_eq!(result_without_rollbacks, result_with_rollbacks); -// } +#[test] +fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::>(); +} // Testing tracer that does not allow the recursion to go deeper than a certain limit struct MaxRecursionTracer { @@ -208,11 +62,11 @@ fn test_layered_rollback() { // This test checks that the layered rollbacks work correctly, i.e. // the rollback by the operator will always revert all the changes - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; let loadnext_contract = get_loadnext_contract().bytecode; @@ -227,7 +81,7 @@ fn test_layered_rollback() { TxType::L2, ); vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); + let deployment_res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!deployment_res.result.is_failed(), "transaction failed"); let loadnext_transaction = account.get_loadnext_transaction( @@ -254,7 +108,8 @@ fn test_layered_rollback() { max_recursion_depth: 15, } .into_tracer_pointer(); - vm.vm.inspect(&mut tracer.into(), VmExecutionMode::OneTx); + vm.vm + .inspect(&mut tracer.into(), InspectExecutionMode::OneTx); let nonce_val2 = vm .vm @@ -281,6 +136,11 @@ fn test_layered_rollback() { ); vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "transaction must not fail"); } + +#[test] +fn rollback_in_call_mode() { + test_rollback_in_call_mode::>(); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs new file mode 100644 index 00000000000..11534a26ded --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs @@ -0,0 +1,9 @@ +use crate::{ + versions::testonly::secp256r1::test_secp256r1, + vm_latest::{HistoryEnabled, Vm}, +}; + +#[test] +fn secp256r1() { + test_secp256r1::>(); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs deleted file mode 100644 index 93be9506a3b..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs +++ /dev/null @@ -1,74 +0,0 @@ -use zk_evm_1_5_0::zkevm_opcode_defs::p256; -use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; -use zksync_types::{web3::keccak256, Execute, H256, U256}; -use zksync_utils::h256_to_u256; - -use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, -}; - -#[test] -fn test_sekp256r1() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_execution_mode(TxExecutionMode::EthCall) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - // The digest, secret key and public key were copied from the following test suit: `https://github.com/hyperledger/besu/blob/b6a6402be90339367d5bcabcd1cfd60df4832465/crypto/algorithms/src/test/java/org/hyperledger/besu/crypto/SECP256R1Test.java#L36` - let sk = p256::SecretKey::from_slice( - &hex::decode("519b423d715f8b581f4fa8ee59f4771a5b44c8130b4e3eacca54a56dda72b464").unwrap(), - ) - .unwrap(); - let sk = p256::ecdsa::SigningKey::from(sk); - - let digest = keccak256(&hex::decode("5905238877c77421f73e43ee3da6f2d9e2ccad5fc942dcec0cbd25482935faaf416983fe165b1a045ee2bcd2e6dca3bdf46c4310a7461f9a37960ca672d3feb5473e253605fb1ddfd28065b53cb5858a8ad28175bf9bd386a5e471ea7a65c17cc934a9d791e91491eb3754d03799790fe2d308d16146d5c9b0d0debd97d79ce8").unwrap()); - let public_key_encoded = hex::decode("1ccbe91c075fc7f4f033bfa248db8fccd3565de94bbfb12f3c59ff46c271bf83ce4014c68811f9a21a1fdb2c0e6113e06db7ca93b7404e78dc7ccd5ca89a4ca9").unwrap(); - - let (sig, _) = sk.sign_prehash_recoverable(&digest).unwrap(); - let (r, s) = sig.split_bytes(); - - let mut encoded_r = [0u8; 32]; - encoded_r.copy_from_slice(&r); - - let mut encoded_s = [0u8; 32]; - encoded_s.copy_from_slice(&s); - - let mut x = [0u8; 32]; - x.copy_from_slice(&public_key_encoded[0..32]); - - let mut y = [0u8; 32]; - y.copy_from_slice(&public_key_encoded[32..64]); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), - calldata: [digest, encoded_r, encoded_s, x, y].concat(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let execution_result = vm.vm.execute(VmExecutionMode::Batch); - - let ExecutionResult::Success { output } = execution_result.result else { - panic!("batch failed") - }; - - let output = H256::from_slice(&output); - - assert_eq!( - h256_to_u256(output), - U256::from(1u32), - "verification was not successful" - ); -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs index cd020ee9f96..29072e66b1e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs @@ -1,83 +1,14 @@ -use assert_matches::assert_matches; - use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, + versions::testonly::simple_execution::{test_estimate_fee, test_simple_execute}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); + test_estimate_fee::>(); } #[test] fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); + test_simple_execute::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs index 126d174a646..4cb03875a0f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs @@ -1,188 +1,14 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Address, Execute, U256}; - use crate::{ - interface::{ - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, + versions::testonly::storage::{test_storage_behavior, test_transient_storage_behavior}, + vm_latest::{HistoryEnabled, Vm}, }; -#[derive(Debug, Default)] - -struct TestTxInfo { - calldata: Vec, - fee_overrides: Option, - should_fail: bool, -} - -fn test_storage(txs: Vec) -> u32 { - let bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let test_contract_address = Address::random(); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(txs.len() as u32) - .with_custom_contracts(vec![(bytecode, test_contract_address, false)]) - .build(); - - let mut last_result = None; - - for (id, tx) in txs.into_iter().enumerate() { - let TestTxInfo { - calldata, - fee_overrides, - should_fail, - } = tx; - - let account = &mut vm.rich_accounts[id]; - - vm.vm.make_snapshot(); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: 0.into(), - factory_deps: vec![], - }, - fee_overrides, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - if should_fail { - assert!(result.result.is_failed(), "Transaction should fail"); - vm.vm.rollback_to_the_latest_snapshot(); - } else { - assert!(!result.result.is_failed(), "Transaction should not fail"); - vm.vm.pop_snapshot_no_rollback(); - } - - last_result = Some(result); - } - - last_result.unwrap().statistics.pubdata_published -} - -fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { - test_storage(vec![ - TestTxInfo::default(), - TestTxInfo { - calldata: second_tx_calldata, - fee_overrides: None, - should_fail: false, - }, - ]) -} - -#[test] -fn test_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - // In all of the tests below we provide the first tx to ensure that the tracers will not include - // the statistics from the start of the bootloader and will only include those for the transaction itself. - - let base_pubdata = test_storage_one_tx(vec![]); - let simple_test_pubdata = test_storage_one_tx( - contract - .function("simpleWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_pubdata = test_storage_one_tx( - contract - .function("resettingWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_via_revert_pubdata = test_storage_one_tx( - contract - .function("resettingWriteViaRevert") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - - assert_eq!(simple_test_pubdata - base_pubdata, 65); - assert_eq!(resetting_write_pubdata - base_pubdata, 34); - assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); -} - #[test] -fn test_transient_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let first_tstore_test = contract - .function("testTransientStore") - .unwrap() - .encode_input(&[]) - .unwrap(); - // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. - let second_tstore_test = contract - .function("assertTValue") - .unwrap() - .encode_input(&[Token::Uint(U256::zero())]) - .unwrap(); - - test_storage(vec![ - TestTxInfo { - calldata: first_tstore_test, - ..TestTxInfo::default() - }, - TestTxInfo { - calldata: second_tstore_test, - ..TestTxInfo::default() - }, - ]); +fn storage_behavior() { + test_storage_behavior::>(); } #[test] -fn test_transient_storage_behavior_panic() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let basic_tstore_test = contract - .function("tStoreAndRevert") - .unwrap() - .encode_input(&[Token::Uint(U256::one()), Token::Bool(false)]) - .unwrap(); - - let small_fee = Fee { - // Something very-very small to make the validation fail - gas_limit: 10_000.into(), - ..Account::default_fee() - }; - - test_storage(vec![ - TestTxInfo { - calldata: basic_tstore_test.clone(), - ..TestTxInfo::default() - }, - TestTxInfo { - fee_overrides: Some(small_fee), - should_fail: true, - ..TestTxInfo::default() - }, - TestTxInfo { - calldata: basic_tstore_test, - ..TestTxInfo::default() - }, - ]); +fn transient_storage_behavior() { + test_transient_storage_behavior::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs deleted file mode 100644 index c0ef52afaa5..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_5_0::{aux_structures::Timestamp, vm_state::VmLocalState}; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - interface::storage::WriteStorage, - vm_latest::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_io_refunds: HistoryRecorder, H>, - pub(crate) returned_pubdata_costs: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.storage_frames_stack.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_io_refunds: self.state.storage.returned_io_refunds.clone(), - returned_pubdata_costs: self.state.storage.returned_pubdata_costs.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs deleted file mode 100644 index d55d1fd6a69..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs deleted file mode 100644 index 0f6e13877bf..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ /dev/null @@ -1,335 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::{l2_rollup_da_validator_bytecode, BaseSystemContracts}; -use zksync_types::{ - block::L2BlockHasher, - commitment::{L1BatchCommitmentMode, PubdataParams}, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}, - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, - VmInterface, VmInterfaceExt, - }, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - // pub(crate) fn reset_with_empty_storage(&mut self) { - // self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - // self.reset_state(false); - // } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(&self.storage).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - pubdata_params: Option, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - pubdata_params: self.pubdata_params, - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - pubdata_params: None, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_custom_pubdata_params(mut self, pubdata_params: PubdataParams) -> Self { - self.pubdata_params = Some(pubdata_params); - self - } - - pub(crate) fn with_rollup_pubdata_params(mut self, fixed_address: Option
) -> Self { - // We choose some random address to put the L2 DA validator to. - let l2_da_validator_address = fixed_address.unwrap_or_else(Address::random); - - let bytecode = l2_rollup_da_validator_bytecode(); - - self.pubdata_params = Some(PubdataParams { - l2_da_validator_address, - pubdata_type: L1BatchCommitmentMode::Rollup, - }); - - self.custom_contracts - .push((bytecode, l2_da_validator_address, false)); - - self - } - - pub(crate) fn with_validium_pubdata_params(self) -> Self { - todo!() - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(mut self) -> VmTester { - if self.pubdata_params.is_none() { - self = self.with_rollup_pubdata_params(None); - } - self.system_env.pubdata_params = self.pubdata_params.unwrap(); - - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs index 2db37881352..a2cd6af6211 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs @@ -1,54 +1,9 @@ -use zksync_types::{Execute, H160}; - use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_latest::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, + versions::testonly::tracing_execution_error::test_tracing_of_execution_errors, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(contract_address), - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); +fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs index 2c380623636..f37ebe6a3fb 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs @@ -1,220 +1,16 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::get_balance, - }, - HistoryEnabled, + versions::testonly::transfer::{ + test_reentrancy_protection_send_and_transfer, test_send_and_transfer, }, + vm_latest::{HistoryEnabled, Vm}, }; -enum TestOptions { - Send(U256), - Transfer(U256), -} - -fn test_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let recipeint_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - - let test_contract_address = Address::random(); - let recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - }; - - let mut storage = get_empty_storage(); - storage.set_value( - storage_key_for_eth_balance(&test_contract_address), - u256_to_h256(value), - ); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - (recipeint_bytecode, recipient_address, false), - ]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx_result.result.is_failed(), - "Transaction wasn't successful" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); - - let new_recipient_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &recipient_address, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!(new_recipient_balance, value); -} - #[test] -fn test_send_and_transfer() { - test_send_or_transfer(TestOptions::Send(U256::zero())); - test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); -} - -fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipeint_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - - let test_contract_address = Address::random(); - let reentrant_recipeint_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipeint_address), - Token::Uint(value), - ]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipeint_address), - Token::Uint(value), - ]) - .unwrap(), - ), - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - ( - reentrant_recipeint_bytecode, - reentrant_recipeint_address, - false, - ), - ]) - .build(); - - // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. - let account = &mut vm.rich_accounts[0]; - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(reentrant_recipeint_address), - calldata: reentrant_recipient_abi - .function("setX") - .unwrap() - .encode_input(&[]) - .unwrap(), - value: U256::from(1), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx1_result.result.is_failed(), - "Transaction 1 wasn't successful" - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value, - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - tx2_result.result.is_failed(), - "Transaction 2 should have failed, but it succeeded" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +fn send_and_transfer() { + test_send_and_transfer::>(); } #[test] -fn test_reentrancy_protection_send_and_transfer() { - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( - U256::from(10).pow(18.into()), - )); +fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index d85a504de40..9889e26e4d2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -1,354 +1,21 @@ -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - vm_latest::{ - tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, - HistoryEnabled, + versions::testonly::upgrade::{ + test_complex_upgrader, test_force_deploy_upgrade, test_protocol_upgrade_is_first, }, + vm_latest::{HistoryEnabled, Vm}, }; -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block #[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); +fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::>(); } -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. #[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); +fn force_deploy_upgrade() { + test_force_deploy_upgrade::>(); } -/// Here we show how the work with the complex upgrader could be done #[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: Some(COMPLEX_UPGRADER_ADDRESS), - calldata: complex_upgrader_calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") +fn complex_upgrader() { + test_complex_upgrader::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs deleted file mode 100644 index 9c9d4817588..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ /dev/null @@ -1,150 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::{ - interface::storage::{StoragePtr, WriteStorage}, - vm_latest::{tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode}, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -// pub(crate) fn read_message_root() -> Vec { -// read_bytecode( -// "contracts/l1-contracts/artifacts-zk/contracts/bridgehub/MessageRoot.sol/MessageRoot.json", -// ) -// } - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn read_simple_transfer_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/simple-transfer/simple-transfer.sol/SimpleTransfer.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn load_precompiles_contract() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -pub(crate) fn read_expensive_contract() -> (Vec, Contract) { - const PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; - (read_bytecode(PATH), load_contract(PATH)) -} - -pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { - const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; - (read_bytecode(PATH), load_contract(PATH)) -} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs index 6a908c2a73e..2ae5e81a328 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs @@ -13,7 +13,7 @@ use zk_evm_1_5_0::{ zkevm_opcode_defs::{decoding::EncodingModeProduction, Opcode, RetOpcode}, }; -use super::PubdataTracer; +use super::{EvmDeployTracer, PubdataTracer}; use crate::{ glue::GlueInto, interface::{ @@ -38,7 +38,7 @@ use crate::{ }; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. -pub(crate) struct DefaultExecutionTracer { +pub struct DefaultExecutionTracer { tx_has_been_processed: bool, execution_mode: VmExecutionMode, @@ -63,14 +63,18 @@ pub(crate) struct DefaultExecutionTracer { // It only takes into account circuits that are generated for actual execution. It doesn't // take into account e.g circuits produced by the initial bootloader memory commitment. pub(crate) circuits_tracer: CircuitsTracer, + // This tracer is responsible for handling EVM deployments and providing the data to the code decommitter. + pub(crate) evm_deploy_tracer: Option>, subversion: MultiVMSubversion, storage: StoragePtr, _phantom: PhantomData, } impl DefaultExecutionTracer { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( computational_gas_limit: u32, + use_evm_emulator: bool, execution_mode: VmExecutionMode, dispatcher: TracerDispatcher, storage: StoragePtr, @@ -92,6 +96,7 @@ impl DefaultExecutionTracer { pubdata_tracer, ret_from_the_bootloader: None, circuits_tracer: CircuitsTracer::new(), + evm_deploy_tracer: use_evm_emulator.then(EvmDeployTracer::new), storage, _phantom: PhantomData, } @@ -172,6 +177,9 @@ macro_rules! dispatch_tracers { tracer.$function($( $params ),*); } $self.circuits_tracer.$function($( $params ),*); + if let Some(tracer) = &mut $self.evm_deploy_tracer { + tracer.$function($( $params ),*); + } }; } @@ -289,6 +297,12 @@ impl DefaultExecutionTracer { .finish_cycle(state, bootloader_state) .stricter(&result); + if let Some(evm_deploy_tracer) = &mut self.evm_deploy_tracer { + result = evm_deploy_tracer + .finish_cycle(state, bootloader_state) + .stricter(&result); + } + result.stricter(&self.should_stop_execution()) } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs new file mode 100644 index 00000000000..becc4f22527 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs @@ -0,0 +1,103 @@ +use std::{marker::PhantomData, mem}; + +use zk_evm_1_5_0::{ + aux_structures::Timestamp, + tracing::{AfterExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{ + FarCallOpcode, FatPointer, Opcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, + }, +}; +use zksync_types::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_utils::{bytecode::hash_evm_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_vm_interface::storage::StoragePtr; + +use super::{traits::VmTracer, utils::read_pointer}; +use crate::{ + interface::{storage::WriteStorage, tracer::TracerExecutionStatus}, + tracers::dynamic::vm_1_5_0::DynTracer, + vm_latest::{BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState}, +}; + +/// Tracer responsible for collecting information about EVM deploys and providing those +/// to the code decommitter. +#[derive(Debug)] +pub(crate) struct EvmDeployTracer { + tracked_signature: [u8; 4], + pending_bytecodes: Vec>, + _phantom: PhantomData, +} + +impl EvmDeployTracer { + pub(crate) fn new() -> Self { + let tracked_signature = + ethabi::short_signature("publishEVMBytecode", &[ethabi::ParamType::Bytes]); + + Self { + tracked_signature, + pending_bytecodes: vec![], + _phantom: PhantomData, + } + } +} + +impl DynTracer> for EvmDeployTracer { + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &SimpleMemory, + _storage: StoragePtr, + ) { + if !matches!( + data.opcode.variant.opcode, + Opcode::FarCall(FarCallOpcode::Normal) + ) { + return; + }; + + let current = state.vm_local_state.callstack.current; + let from = current.msg_sender; + let to = current.this_address; + if from != CONTRACT_DEPLOYER_ADDRESS || to != KNOWN_CODES_STORAGE_ADDRESS { + return; + } + + let calldata_ptr = + state.vm_local_state.registers[usize::from(CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER)]; + let data = read_pointer(memory, FatPointer::from_u256(calldata_ptr.value)); + if data.len() < 4 { + return; + } + let (signature, data) = data.split_at(4); + if signature != self.tracked_signature { + return; + } + + match ethabi::decode(&[ethabi::ParamType::Bytes], data) { + Ok(decoded) => { + let published_bytecode = decoded.into_iter().next().unwrap().into_bytes().unwrap(); + self.pending_bytecodes.push(published_bytecode); + } + Err(err) => tracing::error!("Unable to decode `publishEVMBytecode` call: {err}"), + } + } +} + +impl VmTracer for EvmDeployTracer { + fn finish_cycle( + &mut self, + state: &mut ZkSyncVmState, + _bootloader_state: &mut BootloaderState, + ) -> TracerExecutionStatus { + for published_bytecode in mem::take(&mut self.pending_bytecodes) { + let hash = hash_evm_bytecode(&published_bytecode); + let as_words = bytes_to_be_words(published_bytecode); + + state.decommittment_processor.populate( + vec![(h256_to_u256(hash), as_words)], + Timestamp(state.local_state.timestamp), + ); + } + TracerExecutionStatus::Continue + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs index fe916e19e8c..82721a32264 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs @@ -1,11 +1,13 @@ pub(crate) use circuits_tracer::CircuitsTracer; pub(crate) use default_tracers::DefaultExecutionTracer; +pub(crate) use evm_deploy_tracer::EvmDeployTracer; pub(crate) use pubdata_tracer::PubdataTracer; pub(crate) use refunds::RefundsTracer; pub(crate) use result_tracer::ResultTracer; pub(crate) mod circuits_tracer; pub(crate) mod default_tracers; +pub(crate) mod evm_deploy_tracer; pub(crate) mod pubdata_tracer; pub(crate) mod refunds; pub(crate) mod result_tracer; diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index 9e620f96af2..998e8a13ad2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -1,18 +1,17 @@ -use std::marker::PhantomData; +use std::{marker::PhantomData, rc::Rc}; use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zk_evm_1_5_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{ - l2_to_l1_log::l2_to_l1_logs_tree_size, writes::StateDiffRecord, AccountTreeId, - ProtocolVersionId, StorageKey, L1_MESSENGER_ADDRESS, -}; +use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use crate::{ interface::{ + pubdata::{L1MessengerL2ToL1Log, PubdataInput}, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -20,14 +19,14 @@ use crate::{ tracers::dynamic::vm_1_5_0::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_latest::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, constants::BOOTLOADER_HEAP_PAGE, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, tracers::{traits::VmTracer, utils::VmHook}, - types::internals::{PubdataInput, ZkSyncVmState}, + types::internals::ZkSyncVmState, utils::logs::collect_events_and_l1_system_logs_after_timestamp, vm::MultiVMSubversion, StorageOracle, @@ -44,7 +43,7 @@ pub(crate) struct PubdataTracer { // to the L1Messenger. enforced_state_diffs: Option>, subversion: MultiVMSubversion, - protocol_version: ProtocolVersionId, + pubdata_builder: Option>, _phantom_data: PhantomData, } @@ -53,7 +52,7 @@ impl PubdataTracer { l1_batch_env: L1BatchEnv, execution_mode: VmExecutionMode, subversion: MultiVMSubversion, - protocol_version: ProtocolVersionId, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -61,7 +60,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: None, subversion, - protocol_version, + pubdata_builder, _phantom_data: Default::default(), } } @@ -74,7 +73,7 @@ impl PubdataTracer { execution_mode: VmExecutionMode, forced_state_diffs: Vec, subversion: MultiVMSubversion, - protocol_version: ProtocolVersionId, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -82,7 +81,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: Some(forced_state_diffs), subversion, - protocol_version, + pubdata_builder, _phantom_data: Default::default(), } } @@ -192,7 +191,6 @@ impl PubdataTracer { l2_to_l1_messages: self.get_total_l1_messenger_messages(state), published_bytecodes: self.get_total_published_bytecodes(state), state_diffs: self.get_state_diffs(&state.storage), - l2_to_l1_logs_tree_size: l2_to_l1_logs_tree_size(self.protocol_version), } } } @@ -230,18 +228,22 @@ impl VmTracer for PubdataTracer { if self.pubdata_info_requested { let pubdata_input = self.build_pubdata_input(state); - // Save the pubdata for the future initial bootloader memory building - bootloader_state.set_pubdata_input(pubdata_input.clone()); - // Apply the pubdata to the current memory let mut memory_to_apply = vec![]; apply_pubdata_to_memory( &mut memory_to_apply, - pubdata_input, - bootloader_state.get_pubdata_params(), - bootloader_state.get_vm_subversion(), + self.pubdata_builder + .as_ref() + .expect("`pubdata_builder` is required to finish batch") + .as_ref(), + &pubdata_input, + bootloader_state.protocol_version(), ); + + // Save the pubdata for the future initial bootloader memory building + bootloader_state.set_pubdata_input(pubdata_input); + state.memory.populate_page( BOOTLOADER_HEAP_PAGE as usize, memory_to_apply, diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs index 0b9f704e8db..601b7b8bd01 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs @@ -1,9 +1,7 @@ -pub(crate) use pubdata::PubdataInput; pub(crate) use snapshot::VmSnapshot; pub(crate) use transaction_data::TransactionData; pub(crate) use vm_state::new_vm_state; pub use vm_state::ZkSyncVmState; -pub(crate) mod pubdata; mod snapshot; mod transaction_data; mod vm_state; diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs deleted file mode 100644 index c0684624bd8..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs +++ /dev/null @@ -1,339 +0,0 @@ -use ethabi::Token; -use zksync_contracts::load_sys_contract_interface; -use zksync_mini_merkle_tree::MiniMerkleTree; -use zksync_types::{ - ethabi, - web3::keccak256, - writes::{compress_state_diffs, StateDiffRecord}, -}; -use zksync_utils::bytecode::hash_bytecode; - -use crate::utils::events::L1MessengerL2ToL1Log; -/// Struct based on which the pubdata blob is formed -#[derive(Debug, Clone, Default)] -pub(crate) struct PubdataInput { - pub(crate) user_logs: Vec, - pub(crate) l2_to_l1_messages: Vec>, - pub(crate) published_bytecodes: Vec>, - pub(crate) state_diffs: Vec, - pub(crate) l2_to_l1_logs_tree_size: usize, -} - -impl PubdataInput { - pub(crate) fn build_pubdata_legacy(self, with_uncompressed_state_diffs: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - .. - } = self; - - // Encoding user L2->L1 logs. - // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` - l1_messenger_pubdata.extend((user_logs.len() as u32).to_be_bytes()); - for l2tol1log in user_logs { - l1_messenger_pubdata.extend(l2tol1log.packed_encoding()); - } - - // Encoding L2->L1 messages - // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` - l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); - for message in l2_to_l1_messages { - l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(message); - } - // Encoding bytecodes - // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` - l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); - for bytecode in published_bytecodes { - l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(bytecode); - } - // Encoding state diffs - // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` - let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); - l1_messenger_pubdata.extend(state_diffs_compressed); - - if with_uncompressed_state_diffs { - l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); - for state_diff in state_diffs { - l1_messenger_pubdata.extend(state_diff.encode_padded()); - } - } - - l1_messenger_pubdata - } -} - -pub trait PubdataBuilder { - // when `l2_version` is true it will return the data to be sent to the L1_MESSENGER - // otherwise it returns the array of bytes to be sent to L1 inside the operator input. - fn build_pubdata(&self, input: PubdataInput, l2_version: bool) -> Vec; -} - -pub struct RollupPubdataBuilder { - // l2_handler_address: Address, -} - -impl RollupPubdataBuilder { - pub fn new() -> Self { - Self { - // l2_handler_address: l2_handler_address, - } - } -} - -fn encode_user_logs(user_logs: Vec) -> Vec { - // Encoding user L2->L1 logs. - // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` - let mut result = vec![]; - result.extend((user_logs.len() as u32).to_be_bytes()); - for l2tol1log in user_logs { - result.extend(l2tol1log.packed_encoding()); - } - result -} - -impl PubdataBuilder for RollupPubdataBuilder { - fn build_pubdata(&self, input: PubdataInput, l2_version: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - let mut l2_da_header = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - l2_to_l1_logs_tree_size, - } = input; - - if l2_version { - let chained_log_hash = build_chained_log_hash(user_logs.clone()); - let log_root_hash = build_logs_root(user_logs.clone(), l2_to_l1_logs_tree_size); - let chained_msg_hash = build_chained_message_hash(l2_to_l1_messages.clone()); - let chained_bytecodes_hash = build_chained_bytecode_hash(published_bytecodes.clone()); - - l2_da_header.push(Token::FixedBytes(chained_log_hash)); - l2_da_header.push(Token::FixedBytes(log_root_hash)); - l2_da_header.push(Token::FixedBytes(chained_msg_hash)); - l2_da_header.push(Token::FixedBytes(chained_bytecodes_hash)); - } - - l1_messenger_pubdata.extend(encode_user_logs(user_logs)); - - // Encoding L2->L1 messages - // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` - l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); - for message in l2_to_l1_messages { - l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(message); - } - - // Encoding bytecodes - // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` - l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); - for bytecode in published_bytecodes { - l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(bytecode); - } - - // Encoding state diffs - // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` - let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); - l1_messenger_pubdata.extend(state_diffs_compressed); - - if l2_version { - l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); - for state_diff in state_diffs { - l1_messenger_pubdata.extend(state_diff.encode_padded()); - } - - let func_selector = load_sys_contract_interface("IL2DAValidator") - .function("validatePubdata") - .expect("validatePubdata Function does not exist on IL2DAValidator") - .short_signature() - .to_vec(); - - l2_da_header.push(ethabi::Token::Bytes(l1_messenger_pubdata)); - - l1_messenger_pubdata = [func_selector, ethabi::encode(&l2_da_header)] - .concat() - .to_vec(); - } - - l1_messenger_pubdata - } -} - -pub struct ValidiumPubdataBuilder {} - -impl ValidiumPubdataBuilder { - pub fn new() -> Self { - Self {} - } -} - -impl PubdataBuilder for ValidiumPubdataBuilder { - fn build_pubdata(&self, input: PubdataInput, l2_version: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - let mut l2_da_header = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - l2_to_l1_logs_tree_size, - } = input; - - if l2_version { - let chained_log_hash = build_chained_log_hash(user_logs.clone()); - let log_root_hash = build_logs_root(user_logs.clone(), l2_to_l1_logs_tree_size); - let chained_msg_hash = build_chained_message_hash(l2_to_l1_messages.clone()); - let chained_bytecodes_hash = build_chained_bytecode_hash(published_bytecodes.clone()); - - l2_da_header.push(Token::FixedBytes(chained_log_hash)); - l2_da_header.push(Token::FixedBytes(log_root_hash)); - l2_da_header.push(Token::FixedBytes(chained_msg_hash)); - l2_da_header.push(Token::FixedBytes(chained_bytecodes_hash)); - } - - l1_messenger_pubdata.extend(encode_user_logs(user_logs)); - - if l2_version { - let func_selector = load_sys_contract_interface("IL2DAValidator") - .function("validatePubdata") - .expect("validatePubdata Function does not exist on IL2DAValidator") - .short_signature() - .to_vec(); - - l2_da_header.push(ethabi::Token::Bytes(l1_messenger_pubdata)); - - [func_selector, ethabi::encode(&l2_da_header)] - .concat() - .to_vec() - } else { - let state_diffs_packed = state_diffs - .into_iter() - .flat_map(|diff| diff.encode_padded()) - .collect::>(); - - keccak256(&state_diffs_packed).to_vec() - } - } -} - -fn build_chained_log_hash(user_logs: Vec) -> Vec { - let mut chained_log_hash = vec![0u8; 32]; - - for log in user_logs { - let log_bytes = log.packed_encoding(); - let hash = keccak256(&log_bytes); - - chained_log_hash = keccak256(&[chained_log_hash, hash.to_vec()].concat()).to_vec(); - } - - chained_log_hash -} - -fn build_logs_root( - user_logs: Vec, - l2_to_l1_logs_tree_size: usize, -) -> Vec { - let logs = user_logs.iter().map(|log| { - let encoded = log.packed_encoding(); - let mut slice = [0u8; 88]; - slice.copy_from_slice(&encoded); - slice - }); - MiniMerkleTree::new(logs, Some(l2_to_l1_logs_tree_size)) - .merkle_root() - .as_bytes() - .to_vec() -} - -fn build_chained_message_hash(l2_to_l1_messages: Vec>) -> Vec { - let mut chained_msg_hash = vec![0u8; 32]; - - for msg in l2_to_l1_messages { - let hash = keccak256(&msg); - - chained_msg_hash = keccak256(&[chained_msg_hash, hash.to_vec()].concat()).to_vec(); - } - - chained_msg_hash -} - -fn build_chained_bytecode_hash(published_bytecodes: Vec>) -> Vec { - let mut chained_bytecode_hash = vec![0u8; 32]; - - for bytecode in published_bytecodes { - let hash = hash_bytecode(&bytecode).to_fixed_bytes(); - - chained_bytecode_hash = - keccak256(&[chained_bytecode_hash, hash.to_vec()].concat()).to_vec(); - } - - chained_bytecode_hash -} - -#[cfg(test)] -mod tests { - - // FIXME: restore this test - // #[test] - // fn test_basic_pubdata_building() { - // // Just using some constant addresses for tests - // let addr1 = BOOTLOADER_ADDRESS; - // let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; - - // let user_logs = vec![L1MessengerL2ToL1Log { - // l2_shard_id: 0, - // is_service: false, - // tx_number_in_block: 0, - // sender: addr1, - // key: 1.into(), - // value: 128.into(), - // }]; - - // let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; - - // let published_bytecodes = vec![hex::decode("aaaabbbb").unwrap()]; - - // // For covering more cases, we have two state diffs: - // // One with enumeration index present (and so it is a repeated write) and the one without it. - // let state_diffs = vec![ - // StateDiffRecord { - // address: addr2, - // key: 155.into(), - // derived_key: u256_to_h256(125.into()).0, - // enumeration_index: 12, - // initial_value: 11.into(), - // final_value: 12.into(), - // }, - // StateDiffRecord { - // address: addr2, - // key: 156.into(), - // derived_key: u256_to_h256(126.into()).0, - // enumeration_index: 0, - // initial_value: 0.into(), - // final_value: 14.into(), - // }, - // ]; - - // let input = PubdataInput { - // user_logs, - // l2_to_l1_messages, - // published_bytecodes, - // state_diffs, - // }; - - // let pubdata = - // ethabi::encode(&[ethabi::Token::Bytes(input.build_pubdata(true))])[32..].to_vec(); - - // assert_eq!(hex::encode(pubdata), "00000000000000000000000000000000000000000000000000000000000002c700000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000004aaaabbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); - // } -} diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 2ec86eb3cea..90948f2f89f 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -46,8 +46,8 @@ pub(crate) struct TransactionData { pub(crate) raw_bytes: Option>, } -impl From for TransactionData { - fn from(execute_tx: Transaction) -> Self { +impl TransactionData { + pub(crate) fn new(execute_tx: Transaction, use_evm_emulator: bool) -> Self { match execute_tx.common_data { ExecuteTransactionCommon::L2(common_data) => { let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); @@ -62,6 +62,19 @@ impl From for TransactionData { U256::zero() }; + let should_deploy_contract = if execute_tx.execute.contract_address.is_none() { + // Transactions with no `contract_address` should be filtered out by the API server, + // so this is more of a sanity check. + assert!( + use_evm_emulator, + "`execute.contract_address` not set for transaction {:?} with EVM emulation disabled", + common_data.hash() + ); + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + // Ethereum transactions do not sign gas per pubdata limit, and so for them we need to use // some default value. We use the maximum possible value that is allowed by the bootloader // (i.e. we can not use u64::MAX, because the bootloader requires gas per pubdata for such @@ -85,7 +98,7 @@ impl From for TransactionData { value: execute_tx.execute.value, reserved: [ should_check_chain_id, - U256::zero(), + should_deploy_contract, U256::zero(), U256::zero(), ], diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index b6e5e127c85..d25f66361f1 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -33,7 +33,6 @@ use crate::{ oracles::storage::StorageOracle, types::l1_batch::bootloader_initial_memory, utils::l2_blocks::{assert_next_block, load_last_l2_block}, - MultiVMSubversion, }, }; @@ -65,7 +64,6 @@ pub(crate) fn new_vm_state( storage: StoragePtr, system_env: &SystemEnv, l1_batch_env: &L1BatchEnv, - subversion: MultiVMSubversion, ) -> (ZkSyncVmState, BootloaderState) { let last_l2_block = if let Some(last_l2_block) = load_last_l2_block(&storage) { last_l2_block @@ -100,6 +98,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + if let Some(evm_emulator) = &system_env.base_system_smart_contracts.evm_emulator { + decommittment_processor.populate( + vec![(h256_to_u256(evm_emulator.hash), evm_emulator.code.clone())], + Timestamp(0), + ); + } + memory.populate( vec![( BOOTLOADER_CODE_PAGE, @@ -119,6 +124,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + // By convention, default AA is used as a fallback if the EVM emulator is not available. + let evm_emulator_code_hash = system_env + .base_system_smart_contracts + .evm_emulator + .as_ref() + .unwrap_or(&system_env.base_system_smart_contracts.default_aa) + .hash; let mut vm = VmState::empty_state( storage_oracle, memory, @@ -130,11 +142,7 @@ pub(crate) fn new_vm_state( default_aa_code_hash: h256_to_u256( system_env.base_system_smart_contracts.default_aa.hash, ), - // For now, the default account hash is used as the code hash for the EVM simulator. - // In the 1.5.0 version, it is not possible to instantiate EVM bytecode. - evm_simulator_code_hash: h256_to_u256( - system_env.base_system_smart_contracts.default_aa.hash, - ), + evm_simulator_code_hash: h256_to_u256(evm_emulator_code_hash), zkporter_is_available: system_env.zk_porter_available, }, ); @@ -183,8 +191,7 @@ pub(crate) fn new_vm_state( system_env.execution_mode, bootloader_initial_memory, first_l2_block, - system_env.pubdata_params, - subversion, + system_env.version, ); (vm, bootloader_state) diff --git a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs index 0fb803de5d4..aeb66755f51 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs @@ -1,6 +1,37 @@ -/// Utility functions for the VM. +//! Utility functions for the VM. + +use once_cell::sync::Lazy; +use zk_evm_1_5_0::aux_structures::MemoryPage; +use zksync_types::{H256, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_vm_interface::VmEvent; + pub mod fee; pub mod l2_blocks; pub(crate) mod logs; pub mod overhead; pub mod transaction_encoding; + +pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 2) +} + +/// Extracts all bytecodes marked as known on the system contracts. +pub fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { + static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { + ethabi::long_signature( + "MarkedAsKnown", + &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], + ) + }); + + all_generated_events + .iter() + .filter(|event| { + // Filter events from the deployer contract that match the expected signature. + event.address == KNOWN_CODES_STORAGE_ADDRESS + && event.indexed_topics.len() == 3 + && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE + }) + .map(|event| event.indexed_topics[1]) + .collect() +} diff --git a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs index 86c49a3eb15..ed532f89dbc 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs @@ -10,7 +10,9 @@ pub trait TransactionVmExt { impl TransactionVmExt for Transaction { fn bootloader_encoding_size(&self) -> usize { - let transaction_data: TransactionData = self.clone().into(); + // Since we want to just measure the encoding size, `use_evm_emulator` arg doesn't matter here, + // so we use a more lenient option. + let transaction_data = TransactionData::new(self.clone(), true); transaction_data.into_tokens().len() } } diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 63ec5f7451c..c6573d64200 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,25 +1,28 @@ +use std::{collections::HashMap, rc::Rc}; + use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, vm::VmVersion, Transaction, H256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{be_words_to_bytes, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmTrackingContracts, + VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_latest::{ bootloader_state::BootloaderState, old_vm::{events::merge_events, history_recorder::HistoryEnabled}, - tracers::dispatcher::TracerDispatcher, + tracers::{dispatcher::TracerDispatcher, PubdataTracer}, types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}, }, HistoryMode, @@ -36,7 +39,7 @@ pub(crate) enum MultiVMSubversion { SmallBootloaderMemory, /// The final correct version of v1.5.0 IncreasedBootloaderMemory, - /// Version for protocol v25 + /// VM for post-gateway versions. Gateway, } @@ -82,6 +85,20 @@ impl Vm { self.state.local_state.callstack.current.ergs_remaining } + pub(crate) fn decommit_bytecodes(&self, hashes: &[H256]) -> HashMap> { + let bytecodes = hashes.iter().map(|&hash| { + let bytecode_words = self + .state + .decommittment_processor + .known_bytecodes + .inner() + .get(&h256_to_u256(hash)) + .unwrap_or_else(|| panic!("Bytecode with hash {hash:?} not found")); + (hash, be_words_to_bytes(bytecode_words)) + }); + bytecodes.collect() + } + // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); @@ -121,18 +138,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -164,19 +186,31 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let pubdata_tracer = Some(PubdataTracer::new( + self.batch_env.clone(), + VmExecutionMode::Batch, + self.subversion, + Some(pubdata_builder.clone()), + )); - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + pubdata_tracer, + ); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.bootloader_state.bootloader_memory(); + let bootloader_memory = self + .bootloader_state + .bootloader_memory(pubdata_builder.as_ref()); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, final_bootloader_memory: Some(bootloader_memory), - pubdata_input: Some(self.bootloader_state.get_encoded_pubdata()), + pubdata_input: Some( + self.bootloader_state + .settlement_layer_pubdata(pubdata_builder.as_ref()), + ), state_diffs: Some( self.bootloader_state .get_pubdata_information() @@ -206,8 +240,7 @@ impl Vm { storage: StoragePtr, subversion: MultiVMSubversion, ) -> Self { - let (state, bootloader_state) = - new_vm_state(storage.clone(), &system_env, &batch_env, subversion); + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); Self { bootloader_state, state, diff --git a/core/lib/multivm/src/versions/vm_m5/utils.rs b/core/lib/multivm/src/versions/vm_m5/utils.rs index 8c5bca674c6..a38618395b1 100644 --- a/core/lib/multivm/src/versions/vm_m5/utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -253,13 +253,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - /// Log query, which handle initial and repeated writes to the storage #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct StorageLogQuery { diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 40f66659f29..55afeed17cd 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -1,11 +1,14 @@ +use std::rc::Rc; + use zksync_types::{vm::VmVersion, Transaction}; use zksync_utils::h256_to_u256; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_m5::{ @@ -50,27 +53,34 @@ impl Vm { _phantom: Default::default(), } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics::default() + } } impl VmInterface for Vm { /// Tracers are not supported for here we use `()` as a placeholder type TracerDispatcher = (); - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { crate::vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, self.system_env.execution_mode.glue_into(), - ) + ); + PushTransactionResult { + compressed_bytecodes: (&[]).into(), // bytecode compression isn't supported + } } fn inspect( &mut self, _tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => self.vm.execute_next_tx().glue_into(), TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self .vm @@ -79,8 +89,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -102,24 +111,11 @@ impl VmInterface for Vm { // Bytecode compression isn't supported ( Ok(vec![].into()), - self.inspect(&mut (), VmExecutionMode::OneTx), + self.inspect(&mut (), InspectExecutionMode::OneTx), ) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: 0, - event_sink_history: 0, - memory_inner: 0, - memory_history: 0, - decommittment_processor_inner: 0, - decommittment_processor_history: 0, - storage_inner: 0, - storage_history: 0, - } - } - - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m5::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_m6/utils.rs b/core/lib/multivm/src/versions/vm_m6/utils.rs index d9709022fe3..912a30a4eaf 100644 --- a/core/lib/multivm/src/versions/vm_m6/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -256,13 +256,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - pub(crate) fn calculate_computational_gas_used< S: Storage, T: PubdataSpentTracer, diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 627687a5524..4c67a218418 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -1,13 +1,14 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::{vm::VmVersion, Transaction}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, @@ -50,24 +51,45 @@ impl Vm { system_env, } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } } impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { - crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( - &mut self.vm, - &tx, - self.system_env.execution_mode.glue_into(), - None, - ) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { + let compressed_bytecodes = + crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( + &mut self.vm, + &tx, + self.system_env.execution_mode.glue_into(), + None, + ); + PushTransactionResult { + compressed_bytecodes: compressed_bytecodes.into(), + } } fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -76,7 +98,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer.call_tracer.is_some(); let result = self.vm.execute_next_tx( @@ -95,8 +117,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -186,24 +207,7 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m6::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs index 7a9fbb73fe4..ae44e721b0d 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs @@ -491,7 +491,7 @@ fn get_bootloader_memory_v1( predefined_refunds[tx_index_in_block], block_gas_price_per_pubdata as u32, previous_compressed, - compressed_bytecodes, + &compressed_bytecodes, ); previous_compressed += total_compressed_len; @@ -536,7 +536,7 @@ fn get_bootloader_memory_v2( predefined_refunds[tx_index_in_block], block_gas_price_per_pubdata as u32, previous_compressed, - compressed_bytecodes, + &compressed_bytecodes, ); previous_compressed += total_compressed_len_words; @@ -554,7 +554,7 @@ pub fn push_transaction_to_bootloader_memory( tx: &Transaction, execution_mode: TxExecutionMode, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx: TransactionData = tx.clone().into(); let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); @@ -564,7 +564,7 @@ pub fn push_transaction_to_bootloader_memory( execution_mode, overhead, explicit_compressed_bytecodes, - ); + ) } pub fn push_raw_transaction_to_bootloader_memory( @@ -573,7 +573,7 @@ pub fn push_raw_transaction_to_bootloader_memory( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { match vm.vm_subversion { MultiVMSubversion::V1 => push_raw_transaction_to_bootloader_memory_v1( vm, @@ -599,7 +599,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -651,7 +651,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( predefined_overhead, trusted_ergs_limit, previous_bytecodes, - compressed_bytecodes, + &compressed_bytecodes, ); vm.state.memory.populate_page( @@ -661,6 +661,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( ); vm.bootloader_state.add_tx_data(encoded_tx_size); vm.bootloader_state.add_compressed_bytecode(compressed_len); + compressed_bytecodes } // Bytecode compression bug fixed @@ -670,7 +671,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -730,7 +731,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( predefined_overhead, trusted_ergs_limit, previous_bytecodes, - compressed_bytecodes, + &compressed_bytecodes, ); vm.state.memory.populate_page( @@ -741,6 +742,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( vm.bootloader_state.add_tx_data(encoded_tx_size); vm.bootloader_state .add_compressed_bytecode(compressed_bytecodes_encoding_len_words); + compressed_bytecodes } #[allow(clippy::too_many_arguments)] @@ -752,7 +754,7 @@ fn get_bootloader_memory_for_tx( predefined_refund: u32, block_gas_per_pubdata: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); @@ -779,7 +781,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( predefined_overhead: u32, trusted_gas_limit: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let mut memory: Vec<(usize, U256)> = Vec::default(); let bootloader_description_offset = @@ -815,8 +817,8 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; let memory_addition: Vec<_> = compressed_bytecodes - .into_iter() - .flat_map(|x| bytecode::encode_call(&x)) + .iter() + .flat_map(bytecode::encode_call) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index 7bd488f90a9..14c895d7a0b 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs index 8196760a621..9462a89be2a 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs @@ -90,6 +90,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs index dcda1457b76..a73c212db29 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs deleted file mode 100644 index 23b250d485b..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::interface::ExecutionResult; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs deleted file mode 100644 index b2c126dea00..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs deleted file mode 100644 index fb2d3389407..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_refunds_enhancement::{CallTracer, HistoryEnabled}; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs deleted file mode 100644 index 92e043ae96f..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs deleted file mode 100644 index 1ff6ce12557..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; - -use crate::interface::TxExecutionMode; -use crate::vm_refunds_enhancement::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs deleted file mode 100644 index 8c121db3e43..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_refunds_enhancement::{HistoryDisabled, HistoryMode, Vm}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs deleted file mode 100644 index 88ed141630a..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::types::inputs::system_env::TxExecutionMode; -use crate::vm_refunds_enhancement::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs deleted file mode 100644 index d7b96133000..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs deleted file mode 100644 index 138879cd7ed..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs deleted file mode 100644 index 269b6cf396c..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs +++ /dev/null @@ -1,498 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_refunds_enhancement::tests::tester::default_l1_batch; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, Vm}; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs deleted file mode 100644 index ffb38dd3725..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs deleted file mode 100644 index 21959461906..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs +++ /dev/null @@ -1,181 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::TxExecutionMode; -use crate::interface::VmRevertReason; -use crate::interface::{ExecutionResult, Halt, TxRevertReason, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_nonce_holder_tester; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs deleted file mode 100644 index 03a704841b0..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs +++ /dev/null @@ -1,163 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{ - AccountTreeId, Address, Eip712Domain, Execute, L2ChainId, Nonce, Transaction, U256, -}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_many_owners_custom_account_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs deleted file mode 100644 index 8107ddcdabf..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs +++ /dev/null @@ -1,259 +0,0 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::{ - BootloaderState, DynTracer, HistoryEnabled, HistoryMode, TracerExecutionStatus, - TracerExecutionStopReason, VmTracer, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - vec![Box::new(MaxRecursionTracer { - max_recursion_depth: 15, - })], - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.inspect(vec![], VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs deleted file mode 100644 index eb5e3879837..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs deleted file mode 100644 index 3158fc49444..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs +++ /dev/null @@ -1,127 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_refunds_enhancement::old_vm::event_sink::InMemoryEventSink; -use crate::vm_refunds_enhancement::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs deleted file mode 100644 index 8f7ecc0a733..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::VmRevertReason; -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, -}; -use crate::vm_refunds_enhancement::tests::tester::vm_tester::VmTester; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs deleted file mode 100644 index 800af517ed3..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs +++ /dev/null @@ -1,300 +0,0 @@ -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, -}; -use crate::vm_refunds_enhancement::tests::tester::Account; -use crate::vm_refunds_enhancement::tests::tester::TxType; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::utils::l2_blocks::load_last_l2_block; -use crate::vm_refunds_enhancement::{HistoryMode, Vm}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - history_mode: H, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new( - l1_batch, - self.vm.system_env.clone(), - self.storage.clone(), - self.history_mode.clone(), - ); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - history_mode: H, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - history_mode: self.history_mode.clone(), - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(history_mode: H) -> Self { - Self { - history_mode, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new( - l1_batch_env, - self.system_env, - storage_ptr.clone(), - self.history_mode.clone(), - ); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - history_mode: self.history_mode, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs deleted file mode 100644 index a839f4708ad..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::interface::TxExecutionMode; -use crate::interface::{TxRevertReason, VmRevertReason}; -use crate::vm_refunds_enhancement::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs deleted file mode 100644 index cbbec9a83d5..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs +++ /dev/null @@ -1,342 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::verify_required_storage; -use crate::vm_refunds_enhancement::HistoryEnabled; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs deleted file mode 100644 index ffbb9d89260..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_refunds_enhancement::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 735bd29c3b0..81b0c52cce5 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -1,14 +1,16 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_refunds_enhancement::{ @@ -75,18 +77,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(dispatcher, execution_mode) + self.inspect_inner(dispatcher, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -101,7 +108,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); - let result = self.inspect(dispatcher, VmExecutionMode::OneTx); + let result = self.inspect(dispatcher, InspectExecutionMode::OneTx); if self.has_unpublished_bytecodes() { ( Err(BytecodeCompressionError::BytecodeCompressionFailed), @@ -118,12 +125,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index 2ccedcc6aa9..3e2474835fa 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs index c48d48edd3b..b1ad4d257b7 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs @@ -88,6 +88,7 @@ impl Vm { .refund_tracer .map(|r| r.get_refunds()) .unwrap_or_default(), + new_known_factory_deps: None, }; tx_tracer.dispatcher.save_results(&mut result); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs index d082085a155..dbd8813035e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs deleted file mode 100644 index a30b5a58f63..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::vm_latest::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs deleted file mode 100644 index 773aa77e150..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs deleted file mode 100644 index 7ee647ee1f7..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::tracers::CallTracer; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_virtual_blocks::tracers::traits::ToTracerPointer; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect( - call_tracer.into_tracer_pointer().into(), - VmExecutionMode::OneTx, - ); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs deleted file mode 100644 index 02a69a6a5d2..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs deleted file mode 100644 index e51b8cab570..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; - -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs deleted file mode 100644 index 06d8191310b..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::HistoryMode; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_virtual_blocks::Vm; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs deleted file mode 100644 index f8074c1db10..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::interface::TxExecutionMode; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs deleted file mode 100644 index 2c7ef4a8d11..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs deleted file mode 100644 index 64d9f98ddb3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs deleted file mode 100644 index cba534deeaf..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs +++ /dev/null @@ -1,502 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ - ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_virtual_blocks::tests::tester::default_l1_batch; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_virtual_blocks::Vm; -use crate::HistoryMode; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs deleted file mode 100644 index ffb38dd3725..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs deleted file mode 100644 index 162a3f46cb1..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs +++ /dev/null @@ -1,182 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_nonce_holder_tester; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs deleted file mode 100644 index d0b3b7cbee3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs deleted file mode 100644 index 988841e90ce..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, Eip712Domain, Execute, Nonce, Transaction, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_many_owners_custom_account_contract; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, 270.into()).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(chain_id.into()); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, chain_id.into()); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, chain_id.into()).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs deleted file mode 100644 index c4eac73499f..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs deleted file mode 100644 index a5c0db9468b..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_virtual_blocks::old_vm::event_sink::InMemoryEventSink; -use crate::vm_virtual_blocks::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_virtual_blocks::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; -use crate::HistoryMode as CommonHistoryMode; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs deleted file mode 100644 index 15d3d98ab1d..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,216 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::vm_tester::VmTester; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs deleted file mode 100644 index 9fe0635eba3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs +++ /dev/null @@ -1,291 +0,0 @@ -use std::marker::PhantomData; -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use crate::HistoryMode; -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, VmExecutionMode}; -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::Account; -use crate::vm_virtual_blocks::tests::tester::TxType; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; -use crate::vm_virtual_blocks::utils::l2_blocks::load_last_l2_block; -use crate::vm_virtual_blocks::Vm; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - _phantom: PhantomData, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: 270.into(), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs deleted file mode 100644 index 8258abe0685..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::interface::{TxExecutionMode, TxRevertReason, VmRevertReason}; -use zksync_types::{Execute, H160}; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_virtual_blocks::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs deleted file mode 100644 index 8b3fa0ea291..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs +++ /dev/null @@ -1,344 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::verify_required_storage; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs deleted file mode 100644 index e3db232ffce..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_virtual_blocks::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; -use crate::vm_virtual_blocks::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 2a9d6eed6c7..a2d18e10de4 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -1,14 +1,16 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_virtual_blocks::{ @@ -75,18 +77,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -118,12 +125,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index bfb121a740e..5ff27046377 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,16 +1,17 @@ -use std::mem; +use std::{mem, rc::Rc}; -use zksync_types::{vm::VmVersion, Transaction}; +use zksync_types::{vm::VmVersion, ProtocolVersionId, Transaction}; use zksync_vm2::interface::Tracer; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::history_mode::HistoryMode, interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, utils::ShadowVm, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, - VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, + SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::TracerDispatcher, vm_latest::HistoryEnabled, @@ -55,8 +56,7 @@ macro_rules! dispatch_legacy_vm { impl VmInterface for LegacyVmInstance { type TracerDispatcher = TracerDispatcher, H>; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { dispatch_legacy_vm!(self.push_transaction(tx)) } @@ -64,7 +64,7 @@ impl VmInterface for LegacyVmInstance { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { dispatch_legacy_vm!(self.inspect(&mut mem::take(dispatcher).into(), execution_mode)) } @@ -87,13 +87,9 @@ impl VmInterface for LegacyVmInstance { )) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - dispatch_legacy_vm!(self.record_vm_memory_metrics()) - } - /// Return the results of execution of all batch - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_legacy_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_legacy_vm!(self.finish_batch(pubdata_builder)) } } @@ -222,6 +218,11 @@ impl LegacyVmInstance { } } } + + /// Returns memory-related oracle metrics. + pub fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + dispatch_legacy_vm!(self.record_vm_memory_metrics()) + } } /// Fast VM shadowed by the latest legacy VM. @@ -255,14 +256,14 @@ impl VmInterface for FastVmInsta Tr, ); - fn push_transaction(&mut self, tx: Transaction) { - dispatch_fast_vm!(self.push_transaction(tx)); + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + dispatch_fast_vm!(self.push_transaction(tx)) } fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match self { Self::Fast(vm) => vm.inspect(&mut tracer.1, execution_mode), @@ -292,12 +293,8 @@ impl VmInterface for FastVmInsta } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - dispatch_fast_vm!(self.record_vm_memory_metrics()) - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_fast_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_fast_vm!(self.finish_batch(pubdata_builder)) } } @@ -340,3 +337,11 @@ impl FastVmInstance { Self::Shadowed(ShadowedFastVm::new(l1_batch_env, system_env, storage_view)) } } + +/// Checks whether the protocol version is supported by the fast VM. +pub fn is_supported_by_fast_vm(protocol_version: ProtocolVersionId) -> bool { + matches!( + protocol_version.into(), + VmVersion::Vm1_5_0IncreasedBootloaderMemory + ) +} diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index 308cd65427f..3484f2dad34 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -42,7 +42,6 @@ impl FileBackedObjectStore { Bucket::SchedulerWitnessJobsFri, Bucket::ProofsFri, Bucket::StorageSnapshot, - Bucket::TeeVerifierInput, Bucket::VmDumps, ] { let bucket_path = format!("{base_dir}/{bucket}"); diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 740e8d76e24..0859d58d04b 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -19,7 +19,6 @@ pub enum Bucket { ProofsTee, StorageSnapshot, DataAvailability, - TeeVerifierInput, VmDumps, } @@ -39,7 +38,6 @@ impl Bucket { Self::ProofsTee => "proofs_tee", Self::StorageSnapshot => "storage_logs_snapshots", Self::DataAvailability => "data_availability", - Self::TeeVerifierInput => "tee_verifier_inputs", Self::VmDumps => "vm_dumps", } } diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml index 92d9bd53978..87a0a63567b 100644 --- a/core/lib/protobuf_config/Cargo.toml +++ b/core/lib/protobuf_config/Cargo.toml @@ -26,6 +26,7 @@ rand.workspace = true hex.workspace = true secrecy.workspace = true tracing.workspace = true +time.workspace = true [build-dependencies] zksync_protobuf_build.workspace = true diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index bb9189fd649..9cfa73c28ac 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use std::num::{NonZeroU32, NonZeroUsize}; use anyhow::Context as _; use zksync_config::configs::{api, ApiConfig}; @@ -113,6 +113,11 @@ impl ProtoRepr for proto::Web3JsonRpc { .map(|x| x.try_into()) .transpose() .context("latest_values_cache_size_mb")?, + latest_values_max_block_lag: self + .latest_values_max_block_lag + .map(|x| x.try_into()) + .transpose() + .context("latest_values_max_block_lag")?, fee_history_limit: self.fee_history_limit, max_batch_request_size: self .max_batch_request_size @@ -184,6 +189,7 @@ impl ProtoRepr for proto::Web3JsonRpc { latest_values_cache_size_mb: this .latest_values_cache_size_mb .map(|x| x.try_into().unwrap()), + latest_values_max_block_lag: this.latest_values_max_block_lag.map(NonZeroU32::get), fee_history_limit: this.fee_history_limit, max_batch_request_size: this.max_batch_request_size.map(|x| x.try_into().unwrap()), max_response_body_size_mb: this diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs index f91bf07e43f..2f8ac8df07e 100644 --- a/core/lib/protobuf_config/src/chain.rs +++ b/core/lib/protobuf_config/src/chain.rs @@ -86,6 +86,7 @@ impl ProtoRepr for proto::StateKeeper { // needed during the initialization from files bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, fee_account_addr: None, l1_batch_commit_data_generator_mode: Default::default(), }) diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index 81cad437fe4..2219b6a82ea 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -148,6 +148,7 @@ impl ProtoRepr for proto::Config { }; Ok(Self::Type { + port: self.port.and_then(|x| x.try_into().ok()), server_addr: required(&self.server_addr) .and_then(|x| Ok(x.parse()?)) .context("server_addr")?, @@ -182,6 +183,7 @@ impl ProtoRepr for proto::Config { fn build(this: &Self::Type) -> Self { Self { + port: this.port.map(|x| x.into()), server_addr: Some(this.server_addr.to_string()), public_addr: Some(this.public_addr.0.clone()), max_payload_size: Some(this.max_payload_size.try_into().unwrap()), diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs index 3141c7149ec..dc5b1c567e8 100644 --- a/core/lib/protobuf_config/src/contracts.rs +++ b/core/lib/protobuf_config/src/contracts.rs @@ -119,12 +119,6 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("base_token_addr")?, - l2_da_validator_addr: l2 - .da_validator_addr - .as_ref() - .map(|x| parse_h160(x)) - .transpose() - .context("l2_da_validator_addr")?, chain_admin_addr: l1 .chain_admin_addr .as_ref() @@ -132,6 +126,12 @@ impl ProtoRepr for proto::Contracts { .transpose() .context("chain_admin_addr")?, settlement_layer: self.settlement_layer, + l2_da_validator_addr: l2 + .da_validator_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_da_validator_addr")?, }) } diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index 1499e88efb4..a17a8711a27 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -1,10 +1,10 @@ use anyhow::Context; -use zksync_config::{ - configs::{ - da_client::DAClientConfig::{Avail, ObjectStore}, - {self}, +use zksync_config::configs::{ + self, + da_client::{ + avail::{AvailClientConfig, AvailConfig, AvailDefaultConfig, AvailGasRelayConfig}, + DAClientConfig::{Avail, ObjectStore}, }, - AvailConfig, }; use zksync_protobuf::{required, ProtoRepr}; @@ -18,15 +18,31 @@ impl ProtoRepr for proto::DataAvailabilityClient { let client = match config { proto::data_availability_client::Config::Avail(conf) => Avail(AvailConfig { - api_node_url: required(&conf.api_node_url) - .context("api_node_url")? - .clone(), bridge_api_url: required(&conf.bridge_api_url) .context("bridge_api_url")? .clone(), - app_id: *required(&conf.app_id).context("app_id")?, timeout: *required(&conf.timeout).context("timeout")? as usize, - max_retries: *required(&conf.max_retries).context("max_retries")? as usize, + config: match conf.config.as_ref() { + Some(proto::avail_config::Config::FullClient(full_client_conf)) => { + AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: required(&full_client_conf.api_node_url) + .context("api_node_url")? + .clone(), + app_id: *required(&full_client_conf.app_id).context("app_id")?, + }) + } + Some(proto::avail_config::Config::GasRelay(gas_relay_conf)) => { + AvailClientConfig::GasRelay(AvailGasRelayConfig { + gas_relay_api_url: required(&gas_relay_conf.gas_relay_api_url) + .context("gas_relay_api_url")? + .clone(), + max_retries: *required(&gas_relay_conf.max_retries) + .context("max_retries")? + as usize, + }) + } + None => return Err(anyhow::anyhow!("Invalid Avail DA configuration")), + }, }), proto::data_availability_client::Config::ObjectStore(conf) => { ObjectStore(object_store_proto::ObjectStore::read(conf)?) @@ -41,11 +57,22 @@ impl ProtoRepr for proto::DataAvailabilityClient { Avail(config) => Self { config: Some(proto::data_availability_client::Config::Avail( proto::AvailConfig { - api_node_url: Some(config.api_node_url.clone()), bridge_api_url: Some(config.bridge_api_url.clone()), - app_id: Some(config.app_id), timeout: Some(config.timeout as u64), - max_retries: Some(config.max_retries as u64), + config: match &config.config { + AvailClientConfig::FullClient(conf) => Some( + proto::avail_config::Config::FullClient(proto::AvailClientConfig { + api_node_url: Some(conf.api_node_url.clone()), + app_id: Some(conf.app_id), + }), + ), + AvailClientConfig::GasRelay(conf) => Some( + proto::avail_config::Config::GasRelay(proto::AvailGasRelayConfig { + gas_relay_api_url: Some(conf.gas_relay_api_url.clone()), + max_retries: Some(conf.max_retries as u64), + }), + ), + }, }, )), }, diff --git a/core/lib/protobuf_config/src/en.rs b/core/lib/protobuf_config/src/en.rs index 9c07d1d3929..9d1a3931060 100644 --- a/core/lib/protobuf_config/src/en.rs +++ b/core/lib/protobuf_config/src/en.rs @@ -1,4 +1,7 @@ -use std::{num::NonZeroUsize, str::FromStr}; +use std::{ + num::{NonZeroU64, NonZeroUsize}, + str::FromStr, +}; use anyhow::Context; use zksync_basic_types::{url::SensitiveUrl, L1ChainId, L2ChainId}; @@ -36,6 +39,9 @@ impl ProtoRepr for proto::ExternalNode { .as_ref() .map(|a| a.parse().context("gateway_url")) .transpose()?, + bridge_addresses_refresh_interval_sec: self + .bridge_addresses_refresh_interval_sec + .and_then(NonZeroU64::new), }) } @@ -55,6 +61,9 @@ impl ProtoRepr for proto::ExternalNode { .gateway_url .as_ref() .map(|a| a.expose_str().to_string()), + bridge_addresses_refresh_interval_sec: this + .bridge_addresses_refresh_interval_sec + .map(|a| a.get()), } } } diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 4d41ee5036d..2f5ac5c35cf 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -1,7 +1,7 @@ use anyhow::Context as _; use zksync_config::configs::{self}; use zksync_protobuf::{required, ProtoRepr}; -use zksync_types::settlement::SettlementMode; +use zksync_types::{pubdata_da::PubdataSendingMode, settlement::SettlementMode}; use crate::{proto::eth as proto, read_optional_repr}; @@ -26,23 +26,21 @@ impl proto::ProofSendingMode { } impl proto::PubdataSendingMode { - fn new(x: &configs::eth_sender::PubdataSendingMode) -> Self { - use configs::eth_sender::PubdataSendingMode as From; + fn new(x: &PubdataSendingMode) -> Self { match x { - From::Calldata => Self::Calldata, - From::Blobs => Self::Blobs, - From::Custom => Self::Custom, - From::RelayedL2Calldata => Self::RelayedL2Calldata, + PubdataSendingMode::Calldata => Self::Calldata, + PubdataSendingMode::Blobs => Self::Blobs, + PubdataSendingMode::Custom => Self::Custom, + PubdataSendingMode::RelayedL2Calldata => Self::RelayedL2Calldata, } } - fn parse(&self) -> configs::eth_sender::PubdataSendingMode { - use configs::eth_sender::PubdataSendingMode as To; + fn parse(&self) -> PubdataSendingMode { match self { - Self::Calldata => To::Calldata, - Self::Blobs => To::Blobs, - Self::Custom => To::Custom, - Self::RelayedL2Calldata => To::RelayedL2Calldata, + Self::Calldata => PubdataSendingMode::Calldata, + Self::Blobs => PubdataSendingMode::Blobs, + Self::Custom => PubdataSendingMode::Custom, + Self::RelayedL2Calldata => PubdataSendingMode::RelayedL2Calldata, } } } @@ -136,6 +134,9 @@ impl ProtoRepr for proto::Sender { tx_aggregation_paused: self.tx_aggregation_only_prove_and_execute.unwrap_or(false), ignore_db_nonce: None, priority_tree_start_index: self.priority_op_start_index.map(|x| x as usize), + time_in_mempool_in_l1_blocks_cap: self + .time_in_mempool_in_l1_blocks_cap + .unwrap_or(Self::Type::default_time_in_mempool_in_l1_blocks_cap()), }) } @@ -169,6 +170,7 @@ impl ProtoRepr for proto::Sender { tx_aggregation_only_prove_and_execute: Some(this.tx_aggregation_only_prove_and_execute), tx_aggregation_paused: Some(this.tx_aggregation_paused), priority_op_start_index: this.priority_tree_start_index.map(|x| x as u64), + time_in_mempool_in_l1_blocks_cap: Some(this.time_in_mempool_in_l1_blocks_cap), } } } @@ -183,9 +185,9 @@ impl ProtoRepr for proto::GasAdjuster { .and_then(|x| Ok((*x).try_into()?)) .context("max_base_fee_samples")?, pricing_formula_parameter_a: *required(&self.pricing_formula_parameter_a) - .context("pricing_formula_parameter_a")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_a()), pricing_formula_parameter_b: *required(&self.pricing_formula_parameter_b) - .context("pricing_formula_parameter_b")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_b()), internal_l1_pricing_multiplier: *required(&self.internal_l1_pricing_multiplier) .context("internal_l1_pricing_multiplier")?, internal_enforced_l1_gas_price: self.internal_enforced_l1_gas_price, diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 58d0448d83e..d2695f54dbf 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -76,6 +76,12 @@ impl ProtoRepr for proto::Genesis { .and_then(|x| parse_h256(x)) .context("default_aa_hash")?, ), + evm_emulator_hash: self + .evm_emulator_hash + .as_deref() + .map(parse_h256) + .transpose() + .context("evm_emulator_hash")?, l1_chain_id: required(&self.l1_chain_id) .map(|x| L1ChainId(*x)) .context("l1_chain_id")?, @@ -106,6 +112,7 @@ impl ProtoRepr for proto::Genesis { genesis_protocol_semantic_version: this.protocol_version.map(|x| x.to_string()), default_aa_hash: this.default_aa_hash.map(|x| format!("{:?}", x)), bootloader_hash: this.bootloader_hash.map(|x| format!("{:?}", x)), + evm_emulator_hash: this.evm_emulator_hash.map(|x| format!("{:?}", x)), fee_account: Some(format!("{:?}", this.fee_account)), l1_chain_id: Some(this.l1_chain_id.0), l2_chain_id: Some(this.l2_chain_id.as_u64()), diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 059df4ebc19..885dd16e770 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -12,11 +12,14 @@ mod commitment_generator; mod consensus; mod contract_verifier; mod contracts; +mod da_client; mod da_dispatcher; mod database; mod en; mod eth; mod experimental; +mod external_price_api_client; +mod external_proof_integration_api; mod gateway; mod general; mod genesis; @@ -26,15 +29,12 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; +mod prover_autoscaler; +mod prover_job_monitor; mod pruning; mod secrets; -mod snapshots_creator; - -mod da_client; -mod external_price_api_client; -mod external_proof_integration_api; -mod prover_job_monitor; mod snapshot_recovery; +mod snapshots_creator; #[cfg(test)] mod tests; mod utils; @@ -65,24 +65,23 @@ pub fn read_optional_repr(field: &Option

) -> Option { .transpose() // This error will printed, only if the config partially filled, allows to debug config issues easier .map_err(|err| { - tracing::error!("Failed to serialize config: {err}"); + tracing::error!("Failed to parse config: {err:#}"); err }) .ok() .flatten() } -pub fn decode_yaml_repr( +/// Reads a yaml file. +pub fn read_yaml_repr( path: &PathBuf, deny_unknown_fields: bool, ) -> anyhow::Result { let yaml = std::fs::read_to_string(path).with_context(|| path.display().to_string())?; - let d = serde_yaml::Deserializer::from_str(&yaml); - let this: T = zksync_protobuf::serde::Deserialize { + zksync_protobuf::serde::Deserialize { deny_unknown_fields, } - .proto(d)?; - this.read() + .proto_repr_from_yaml::(&yaml) } pub fn encode_yaml_repr(value: &T::Type) -> anyhow::Result> { diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs index 4b7bd2fd7c3..a587c702633 100644 --- a/core/lib/protobuf_config/src/proof_data_handler.rs +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; use zksync_config::configs; use zksync_protobuf::{repr::ProtoRepr, required}; +use zksync_types::L1BatchNumber; use crate::proto::prover as proto; @@ -14,9 +15,15 @@ impl ProtoRepr for proto::ProofDataHandler { proof_generation_timeout_in_secs: required(&self.proof_generation_timeout_in_secs) .and_then(|x| Ok((*x).try_into()?)) .context("proof_generation_timeout_in_secs")?, - tee_support: required(&self.tee_support) - .copied() - .context("tee_support")?, + tee_config: configs::TeeConfig { + tee_support: self + .tee_support + .unwrap_or_else(configs::TeeConfig::default_tee_support), + first_tee_processed_batch: self + .first_tee_processed_batch + .map(|x| L1BatchNumber(x as u32)) + .unwrap_or_else(configs::TeeConfig::default_first_tee_processed_batch), + }, }) } @@ -24,7 +31,8 @@ impl ProtoRepr for proto::ProofDataHandler { Self { http_port: Some(this.http_port.into()), proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), - tee_support: Some(this.tee_support), + tee_support: Some(this.tee_config.tee_support), + first_tee_processed_batch: Some(this.tee_config.first_tee_processed_batch.0 as u64), } } } diff --git a/core/lib/protobuf_config/src/proto/config/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto index c0d03ea7818..c97c4f3fbe2 100644 --- a/core/lib/protobuf_config/src/proto/config/api.proto +++ b/core/lib/protobuf_config/src/proto/config/api.proto @@ -41,7 +41,8 @@ message Web3JsonRpc { repeated string api_namespaces = 32; // Optional, if empty all namespaces are available optional bool extended_api_tracing = 33; // optional, default false optional bool estimate_gas_optimize_search = 34; // optional, default false - optional string settlement_layer_url = 35; // optional + optional uint32 latest_values_max_block_lag = 35; // optional + optional string settlement_layer_url = 36; // optional reserved 15; reserved "l1_to_l2_transactions_compatibility_mode"; reserved 11; reserved "request_timeout"; diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index d01bda2c847..73fa2435996 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -5,12 +5,26 @@ package zksync.config.da_client; import "zksync/config/object_store.proto"; message AvailConfig { - optional string api_node_url = 1; optional string bridge_api_url = 2; - optional uint32 app_id = 4; optional uint64 timeout = 5; - optional uint64 max_retries = 6; + oneof config { + AvailClientConfig full_client = 7; + AvailGasRelayConfig gas_relay = 8; + } + reserved 1; reserved "api_node_url"; reserved 3; reserved "seed"; + reserved 4; reserved "app_id"; + reserved 6; reserved "max_retries"; +} + +message AvailClientConfig { + optional string api_node_url = 1; + optional uint32 app_id = 2; +} + +message AvailGasRelayConfig { + optional string gas_relay_api_url = 1; + optional uint64 max_retries = 2; } message DataAvailabilityClient { diff --git a/core/lib/protobuf_config/src/proto/config/en.proto b/core/lib/protobuf_config/src/proto/config/en.proto index d8a13d31d4b..69412704ea0 100644 --- a/core/lib/protobuf_config/src/proto/config/en.proto +++ b/core/lib/protobuf_config/src/proto/config/en.proto @@ -10,4 +10,5 @@ message ExternalNode { optional uint64 main_node_rate_limit_rps = 6; // optional optional config.genesis.L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 7; // optional, default to rollup optional string gateway_url = 8; // optional + optional uint64 bridge_addresses_refresh_interval_sec = 9; // optional } diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index 33270efd1f2..24d30bc6187 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -53,7 +53,8 @@ message Sender { reserved 19; reserved "proof_loading_mode"; optional bool tx_aggregation_paused = 20; // required optional bool tx_aggregation_only_prove_and_execute = 21; // required - optional uint64 priority_op_start_index = 22; // optional + optional uint32 time_in_mempool_in_l1_blocks_cap = 22; // optional + optional uint64 priority_op_start_index = 23; // optional } message GasAdjuster { diff --git a/core/lib/protobuf_config/src/proto/config/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto index 54af7a459f9..6559595ae61 100644 --- a/core/lib/protobuf_config/src/proto/config/genesis.proto +++ b/core/lib/protobuf_config/src/proto/config/genesis.proto @@ -28,6 +28,7 @@ message Genesis { optional Prover prover = 10; optional L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 29; // optional, default to rollup optional string genesis_protocol_semantic_version = 12; // optional; - optional uint64 sl_chain_id = 13; // required; + optional string evm_emulator_hash = 13; // optional; h256 + optional uint64 sl_chain_id = 14; // required; reserved 11; reserved "shared_bridge"; } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index 4fe3861183b..92ba770a756 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -107,5 +107,6 @@ message WitnessVectorGenerator { message ProofDataHandler { optional uint32 http_port = 1; // required; u16 optional uint32 proof_generation_timeout_in_secs = 2; // required; s - optional bool tee_support = 3; // required + optional bool tee_support = 3; // optional + optional uint64 first_tee_processed_batch = 4; // optional } diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto new file mode 100644 index 00000000000..9b7f201e9b7 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package zksync.config.prover_autoscaler; + +import "zksync/std.proto"; +import "zksync/config/observability.proto"; + +message ProverAutoscalerConfig { + optional std.Duration graceful_shutdown_timeout = 1; // optional + optional ProverAutoscalerAgentConfig agent_config = 2; // optional + optional ProverAutoscalerScalerConfig scaler_config = 3; // optional + optional observability.Observability observability = 4; // optional +} + +message ProverAutoscalerAgentConfig { + optional uint32 prometheus_port = 1; // required + optional uint32 http_port = 2; // required + repeated string namespaces = 3; // optional + optional string cluster_name = 4; // optional + optional bool dry_run = 5; // optional +} + +message ProtocolVersion { + optional string namespace = 1; // required + optional string protocol_version = 2; // required +} + +message ClusterPriority { + optional string cluster = 1; // required + optional uint32 priority = 2; // required +} + +message ProverSpeed { + optional string gpu = 1; // required + optional uint32 speed = 2; // required +} + +message MaxProver { + optional string cluster_and_gpu = 1; // required, format: / + optional uint32 max = 2; // required +} + +message MinProver { + optional string namespace = 1; // required + optional uint32 min = 2; // required +} + +message ProverAutoscalerScalerConfig { + optional uint32 prometheus_port = 1; // required + optional std.Duration scaler_run_interval = 2; // optional + optional string prover_job_monitor_url = 3; // required + repeated string agents = 4; // required at least one + repeated ProtocolVersion protocol_versions = 5; // repeated at least one + repeated ClusterPriority cluster_priorities = 6; // optional + repeated ProverSpeed prover_speed = 7; // optional + optional uint32 long_pending_duration_s = 8; // optional + repeated MaxProver max_provers = 9; // optional + repeated MinProver min_provers = 10; // optional +} diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index a4e8c1d60dd..74f468627f8 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -22,6 +22,7 @@ message ConsensusSecrets { message AvailSecret { optional string seed_phrase = 1; + optional string gas_relay_api_key = 2; } message DataAvailabilitySecrets { diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 92527df739a..9b0d69e7270 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -70,6 +70,9 @@ message Config { reserved 3; reserved "validators"; + // Port to listen on, for incoming TCP connections. + optional uint32 port = 12; + // IP:port to listen on, for incoming TCP connections. // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). optional string server_addr = 1; // required; IpAddr diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs new file mode 100644 index 00000000000..51f1b162d4c --- /dev/null +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -0,0 +1,240 @@ +use std::collections::HashMap; + +use anyhow::Context; +use time::Duration; +use zksync_config::configs::{self, prover_autoscaler::Gpu}; +use zksync_protobuf::{read_optional, repr::ProtoRepr, required, ProtoFmt}; + +use crate::{proto::prover_autoscaler as proto, read_optional_repr}; + +impl ProtoRepr for proto::ProverAutoscalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + graceful_shutdown_timeout: read_optional(&self.graceful_shutdown_timeout) + .context("graceful_shutdown_timeout")? + .unwrap_or(Self::Type::default_graceful_shutdown_timeout()), + agent_config: read_optional_repr(&self.agent_config), + scaler_config: read_optional_repr(&self.scaler_config), + observability: read_optional_repr(&self.observability), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + graceful_shutdown_timeout: Some(ProtoFmt::build(&this.graceful_shutdown_timeout)), + agent_config: this.agent_config.as_ref().map(ProtoRepr::build), + scaler_config: this.scaler_config.as_ref().map(ProtoRepr::build), + observability: this.observability.as_ref().map(ProtoRepr::build), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerAgentConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerAgentConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, + namespaces: self.namespaces.to_vec(), + cluster_name: Some("".to_string()), + dry_run: self.dry_run.unwrap_or(Self::Type::default_dry_run()), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + http_port: Some(this.http_port.into()), + namespaces: this.namespaces.clone(), + cluster_name: this.cluster_name.clone(), + dry_run: Some(this.dry_run), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerScalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerScalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + scaler_run_interval: read_optional(&self.scaler_run_interval) + .context("scaler_run_interval")? + .unwrap_or(Self::Type::default_scaler_run_interval()), + prover_job_monitor_url: required(&self.prover_job_monitor_url) + .context("prover_job_monitor_url")? + .clone(), + agents: self.agents.to_vec(), + protocol_versions: self + .protocol_versions + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("protocol_versions")?, + cluster_priorities: self + .cluster_priorities + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("cluster_priorities")?, + prover_speed: self + .prover_speed + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("prover_speed")?, + long_pending_duration: match self.long_pending_duration_s { + Some(s) => Duration::seconds(s.into()), + None => Self::Type::default_long_pending_duration(), + }, + max_provers: self.max_provers.iter().fold(HashMap::new(), |mut acc, e| { + let (cluster_and_gpu, max) = e.read().expect("max_provers"); + if let Some((cluster, gpu)) = cluster_and_gpu.split_once('/') { + acc.entry(cluster.to_string()) + .or_default() + .insert(gpu.parse().expect("max_provers/gpu"), max); + } + acc + }), + min_provers: self + .min_provers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("min_provers")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + scaler_run_interval: Some(ProtoFmt::build(&this.scaler_run_interval)), + prover_job_monitor_url: Some(this.prover_job_monitor_url.clone()), + agents: this.agents.clone(), + protocol_versions: this + .protocol_versions + .iter() + .map(|(k, v)| proto::ProtocolVersion::build(&(k.clone(), v.clone()))) + .collect(), + cluster_priorities: this + .cluster_priorities + .iter() + .map(|(k, v)| proto::ClusterPriority::build(&(k.clone(), *v))) + .collect(), + prover_speed: this + .prover_speed + .iter() + .map(|(k, v)| proto::ProverSpeed::build(&(*k, *v))) + .collect(), + long_pending_duration_s: Some(this.long_pending_duration.whole_seconds() as u32), + max_provers: this + .max_provers + .iter() + .flat_map(|(cluster, inner_map)| { + inner_map.iter().map(move |(gpu, max)| { + proto::MaxProver::build(&(format!("{}/{}", cluster, gpu), *max)) + }) + }) + .collect(), + min_provers: this + .min_provers + .iter() + .map(|(k, v)| proto::MinProver::build(&(k.clone(), *v))) + .collect(), + } + } +} + +impl ProtoRepr for proto::ProtocolVersion { + type Type = (String, String); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.namespace).context("namespace")?.clone(), + required(&self.protocol_version) + .context("protocol_version")? + .clone(), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + namespace: Some(this.0.clone()), + protocol_version: Some(this.1.clone()), + } + } +} + +impl ProtoRepr for proto::ClusterPriority { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster).context("cluster")?.clone(), + *required(&self.priority).context("priority")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster: Some(this.0.clone()), + priority: Some(this.1), + } + } +} + +impl ProtoRepr for proto::ProverSpeed { + type Type = (Gpu, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.gpu).context("gpu")?.parse()?, + *required(&self.speed).context("speed")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + gpu: Some(this.0.to_string()), + speed: Some(this.1), + } + } +} + +impl ProtoRepr for proto::MaxProver { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster_and_gpu) + .context("cluster_and_gpu")? + .parse()?, + *required(&self.max).context("max")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster_and_gpu: Some(this.0.to_string()), + max: Some(this.1), + } + } +} + +impl ProtoRepr for proto::MinProver { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.namespace).context("namespace")?.clone(), + *required(&self.min).context("min")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + namespace: Some(this.0.to_string()), + min: Some(this.1), + } + } +} diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index 5af7901a36a..b7e300ad910 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -2,7 +2,7 @@ use std::str::FromStr; use anyhow::Context; use secrecy::ExposeSecret; -use zksync_basic_types::{seed_phrase::SeedPhrase, url::SensitiveUrl}; +use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase, url::SensitiveUrl}; use zksync_config::configs::{ consensus::{AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, da_client::avail::AvailSecrets, @@ -112,14 +112,31 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { let secrets = required(&self.da_secrets).context("config")?; let client = match secrets { - DaSecrets::Avail(avail_secret) => DataAvailabilitySecrets::Avail(AvailSecrets { - seed_phrase: Some( - SeedPhrase::from_str( - required(&avail_secret.seed_phrase).context("seed_phrase")?, - ) - .unwrap(), - ), - }), + DaSecrets::Avail(avail_secret) => { + let seed_phrase = match avail_secret.seed_phrase.as_ref() { + Some(seed) => match SeedPhrase::from_str(seed) { + Ok(seed) => Some(seed), + Err(_) => None, + }, + None => None, + }; + let gas_relay_api_key = match avail_secret.gas_relay_api_key.as_ref() { + Some(api_key) => match APIKey::from_str(api_key) { + Ok(api_key) => Some(api_key), + Err(_) => None, + }, + None => None, + }; + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + return Err(anyhow::anyhow!( + "At least one of seed_phrase or gas_relay_api_key must be provided" + )); + } + DataAvailabilitySecrets::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) + } }; Ok(client) @@ -142,7 +159,24 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { None }; - Some(DaSecrets::Avail(AvailSecret { seed_phrase })) + let gas_relay_api_key = if config.gas_relay_api_key.is_some() { + Some( + config + .clone() + .gas_relay_api_key + .unwrap() + .0 + .expose_secret() + .to_string(), + ) + } else { + None + }; + + Some(DaSecrets::Avail(AvailSecret { + seed_phrase, + gas_relay_api_key, + })) } }; diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index d653b9b92bf..c72bce0bf9a 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -2,7 +2,7 @@ use std::{path::PathBuf, str::FromStr}; use zksync_protobuf::testonly::{test_encode_all_formats, ReprConv}; -use crate::{decode_yaml_repr, proto}; +use crate::{proto, read_yaml_repr}; /// Tests config <-> proto (boilerplate) conversions. #[test] @@ -60,14 +60,11 @@ fn test_encoding() { #[test] fn verify_file_parsing() { let base_path = PathBuf::from_str("../../../etc/env/file_based/").unwrap(); - decode_yaml_repr::(&base_path.join("general.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("general.yaml"), true).unwrap(); // It's allowed to have unknown fields in wallets, e.g. we keep private key for fee account - decode_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); - decode_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("contracts.yaml"), true) - .unwrap(); - decode_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("external_node.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); + read_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("contracts.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("external_node.yaml"), true).unwrap(); } diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 776cd3141cb..acf104cc4c6 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -31,7 +31,7 @@ pub enum ProofGenerationDataResponse { } #[derive(Debug, Serialize, Deserialize)] -pub struct TeeProofGenerationDataResponse(pub Option>); +pub struct TeeProofGenerationDataResponse(pub Box); #[derive(Debug, Serialize, Deserialize)] pub enum SubmitProofResponse { diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 22a20223c8b..cfc1d4a0d55 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -3,9 +3,9 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_object_store::{_reexports::BoxedError, serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ - basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, + basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, commitment::PubdataParams, witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, }; @@ -136,8 +136,25 @@ impl WitnessInputMerklePaths { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct VMRunWitnessInputData { + pub l1_batch_number: L1BatchNumber, + pub used_bytecodes: HashMap>, + pub initial_heap_content: Vec<(usize, U256)>, + pub protocol_version: ProtocolVersionId, + pub bootloader_code: Vec<[u8; 32]>, + pub default_account_code_hash: U256, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub evm_emulator_code_hash: Option, + pub storage_refunds: Vec, + pub pubdata_costs: Vec, + pub witness_block_state: WitnessStorageState, +} + +// skip_serializing_if for field evm_emulator_code_hash doesn't work fine with bincode, +// so we are implementing custom deserialization for it +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VMRunWitnessInputDataLegacy { pub l1_batch_number: L1BatchNumber, pub used_bytecodes: HashMap>, pub initial_heap_content: Vec<(usize, U256)>, @@ -149,6 +166,23 @@ pub struct VMRunWitnessInputData { pub witness_block_state: WitnessStorageState, } +impl From for VMRunWitnessInputData { + fn from(value: VMRunWitnessInputDataLegacy) -> Self { + Self { + l1_batch_number: value.l1_batch_number, + used_bytecodes: value.used_bytecodes, + initial_heap_content: value.initial_heap_content, + protocol_version: value.protocol_version, + bootloader_code: value.bootloader_code, + default_account_code_hash: value.default_account_code_hash, + evm_emulator_code_hash: None, + storage_refunds: value.storage_refunds, + pubdata_costs: value.pubdata_costs, + witness_block_state: value.witness_block_state, + } + } +} + impl StoredObject for VMRunWitnessInputData { const BUCKET: Bucket = Bucket::WitnessInput; @@ -158,10 +192,20 @@ impl StoredObject for VMRunWitnessInputData { format!("vm_run_data_{key}.bin") } - serialize_using_bincode!(); + fn serialize(&self) -> Result, BoxedError> { + zksync_object_store::bincode::serialize(self).map_err(Into::into) + } + + fn deserialize(bytes: Vec) -> Result { + zksync_object_store::bincode::deserialize::(&bytes).or_else(|_| { + zksync_object_store::bincode::deserialize::(&bytes) + .map(Into::into) + .map_err(Into::into) + }) + } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: WitnessInputMerklePaths, @@ -169,6 +213,25 @@ pub struct WitnessInputData { pub eip_4844_blobs: Eip4844Blobs, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WitnessInputDataLegacy { + pub vm_run_data: VMRunWitnessInputDataLegacy, + pub merkle_paths: WitnessInputMerklePaths, + pub previous_batch_metadata: L1BatchMetadataHashes, + pub eip_4844_blobs: Eip4844Blobs, +} + +impl From for WitnessInputData { + fn from(value: WitnessInputDataLegacy) -> Self { + Self { + vm_run_data: value.vm_run_data.into(), + merkle_paths: value.merkle_paths, + previous_batch_metadata: value.previous_batch_metadata, + eip_4844_blobs: value.eip_4844_blobs, + } + } +} + impl StoredObject for WitnessInputData { const BUCKET: Bucket = Bucket::WitnessInput; @@ -178,10 +241,20 @@ impl StoredObject for WitnessInputData { format!("witness_inputs_{key}.bin") } - serialize_using_bincode!(); + fn serialize(&self) -> Result, BoxedError> { + zksync_object_store::bincode::serialize(self).map_err(Into::into) + } + + fn deserialize(bytes: Vec) -> Result { + zksync_object_store::bincode::deserialize::(&bytes).or_else(|_| { + zksync_object_store::bincode::deserialize::(&bytes) + .map(Into::into) + .map_err(Into::into) + }) + } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct L1BatchMetadataHashes { pub root_hash: H256, pub meta_hash: H256, @@ -191,27 +264,30 @@ pub struct L1BatchMetadataHashes { /// Version 1 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { - pub witness_input_merkle_paths: WitnessInputMerklePaths, + pub vm_run_data: VMRunWitnessInputData, + pub merkle_paths: WitnessInputMerklePaths, pub l2_blocks_execution_data: Vec, pub l1_batch_env: L1BatchEnv, pub system_env: SystemEnv, - pub used_contracts: Vec<(H256, Vec)>, + pub pubdata_params: PubdataParams, } impl V1TeeVerifierInput { pub fn new( - witness_input_merkle_paths: WitnessInputMerklePaths, + vm_run_data: VMRunWitnessInputData, + merkle_paths: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, - used_contracts: Vec<(H256, Vec)>, + pubdata_params: PubdataParams, ) -> Self { V1TeeVerifierInput { - witness_input_merkle_paths, + vm_run_data, + merkle_paths, l2_blocks_execution_data, l1_batch_env, system_env, - used_contracts, + pubdata_params, } } } @@ -232,17 +308,6 @@ impl TeeVerifierInput { } } -impl StoredObject for TeeVerifierInput { - const BUCKET: Bucket = Bucket::TeeVerifierInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("tee_verifier_input_for_l1_batch_{key}.bin") - } - - serialize_using_bincode!(); -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index 7c569f73a5a..cf68d2e181a 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -174,7 +174,6 @@ pub(super) fn mock_l2_block_header(l2_block_number: L2BlockNumber) -> L2BlockHea l1_tx_count: 0, l2_tx_count: 0, fee_account_address: Address::repeat_byte(1), - pubdata_params: Default::default(), base_fee_per_gas: 0, gas_per_pubdata_limit: 0, batch_fee_input: Default::default(), @@ -183,6 +182,7 @@ pub(super) fn mock_l2_block_header(l2_block_number: L2BlockNumber) -> L2BlockHea virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } diff --git a/core/lib/state/src/cache/lru_cache.rs b/core/lib/state/src/cache/lru_cache.rs index fa37bdb3e22..55b037bbb8c 100644 --- a/core/lib/state/src/cache/lru_cache.rs +++ b/core/lib/state/src/cache/lru_cache.rs @@ -46,6 +46,13 @@ where Self { name, cache } } + /// Returns the capacity of this cache in bytes. + pub fn capacity(&self) -> u64 { + self.cache + .as_ref() + .map_or(0, |cache| cache.policy().max_capacity().unwrap_or(u64::MAX)) + } + /// Gets an entry and pulls it to the front if it exists. pub fn get(&self, key: &K) -> Option { let latency = METRICS.latency[&(self.name, Method::Get)].start(); diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 67866634ee4..f689f1487f3 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -72,8 +72,7 @@ impl CacheValue for TimestampedStorageValue { #[allow(clippy::cast_possible_truncation)] // doesn't happen in practice fn cache_weight(&self) -> u32 { const WEIGHT: usize = mem::size_of::() + mem::size_of::(); - // ^ Since values are small in size, we want to account for key sizes as well - + // ^ Since values are small, we want to account for key sizes as well WEIGHT as u32 } } @@ -114,6 +113,14 @@ impl ValuesCache { Self(Arc::new(RwLock::new(inner))) } + fn capacity(&self) -> u64 { + self.0 + .read() + .expect("values cache is poisoned") + .values + .capacity() + } + /// *NB.* The returned value should be considered immediately stale; at best, it can be /// the lower boundary on the current `valid_for` value. fn valid_for(&self) -> L2BlockNumber { @@ -154,80 +161,86 @@ impl ValuesCache { } } + fn reset( + &self, + from_l2_block: L2BlockNumber, + to_l2_block: L2BlockNumber, + ) -> anyhow::Result<()> { + // We can spend too much time loading data from Postgres, so we opt for an easier "update" route: + // evict *everything* from cache and call it a day. This should not happen too often in practice. + tracing::info!( + "Storage values cache is too far behind (current L2 block is {from_l2_block}; \ + requested update to {to_l2_block}); resetting the cache" + ); + let mut lock = self + .0 + .write() + .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; + anyhow::ensure!( + lock.valid_for == from_l2_block, + "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ + valid for L2 block #{}", + lock.valid_for + ); + lock.valid_for = to_l2_block; + lock.values.clear(); + + CACHE_METRICS.values_emptied.inc(); + CACHE_METRICS + .values_valid_for_miniblock + .set(u64::from(to_l2_block.0)); + Ok(()) + } + async fn update( &self, from_l2_block: L2BlockNumber, to_l2_block: L2BlockNumber, connection: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { - const MAX_L2_BLOCKS_LAG: u32 = 5; - tracing::debug!( "Updating storage values cache from L2 block {from_l2_block} to {to_l2_block}" ); - if to_l2_block.0 - from_l2_block.0 > MAX_L2_BLOCKS_LAG { - // We can spend too much time loading data from Postgres, so we opt for an easier "update" route: - // evict *everything* from cache and call it a day. This should not happen too often in practice. - tracing::info!( - "Storage values cache is too far behind (current L2 block is {from_l2_block}; \ - requested update to {to_l2_block}); resetting the cache" - ); - let mut lock = self - .0 - .write() - .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; - anyhow::ensure!( - lock.valid_for == from_l2_block, - "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ - valid for L2 block #{}", - lock.valid_for - ); - lock.valid_for = to_l2_block; - lock.values.clear(); + let update_latency = CACHE_METRICS.values_update[&ValuesUpdateStage::LoadKeys].start(); + let l2_blocks = (from_l2_block + 1)..=to_l2_block; + let modified_keys = connection + .storage_logs_dal() + .modified_keys_in_l2_blocks(l2_blocks.clone()) + .await?; - CACHE_METRICS.values_emptied.inc(); - } else { - let update_latency = CACHE_METRICS.values_update[&ValuesUpdateStage::LoadKeys].start(); - let l2_blocks = (from_l2_block + 1)..=to_l2_block; - let modified_keys = connection - .storage_logs_dal() - .modified_keys_in_l2_blocks(l2_blocks.clone()) - .await?; - - let elapsed = update_latency.observe(); - CACHE_METRICS - .values_update_modified_keys - .observe(modified_keys.len()); - tracing::debug!( - "Loaded {modified_keys_len} modified storage keys from L2 blocks {l2_blocks:?}; \ - took {elapsed:?}", - modified_keys_len = modified_keys.len() - ); + let elapsed = update_latency.observe(); + CACHE_METRICS + .values_update_modified_keys + .observe(modified_keys.len()); + tracing::debug!( + "Loaded {modified_keys_len} modified storage keys from L2 blocks {l2_blocks:?}; \ + took {elapsed:?}", + modified_keys_len = modified_keys.len() + ); - let update_latency = - CACHE_METRICS.values_update[&ValuesUpdateStage::RemoveStaleKeys].start(); - let mut lock = self - .0 - .write() - .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; - // The code below holding onto the write `lock` is the only code that can theoretically poison the `RwLock` - // (other than emptying the cache above). Thus, it's kept as simple and tight as possible. - // E.g., we load data from Postgres beforehand. - anyhow::ensure!( - lock.valid_for == from_l2_block, - "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ - valid for L2 block #{}", - lock.valid_for - ); - lock.valid_for = to_l2_block; - for modified_key in &modified_keys { - lock.values.remove(modified_key); - } - lock.values.report_size(); - drop(lock); - update_latency.observe(); + let update_latency = + CACHE_METRICS.values_update[&ValuesUpdateStage::RemoveStaleKeys].start(); + let mut lock = self + .0 + .write() + .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; + // The code below holding onto the write `lock` is the only code that can theoretically poison the `RwLock` + // (other than emptying the cache above). Thus, it's kept as simple and tight as possible. + // E.g., we load data from Postgres beforehand. + anyhow::ensure!( + lock.valid_for == from_l2_block, + "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ + valid for L2 block #{}", + lock.valid_for + ); + lock.valid_for = to_l2_block; + for modified_key in &modified_keys { + lock.values.remove(modified_key); } + lock.values.report_size(); + drop(lock); + update_latency.observe(); CACHE_METRICS .values_valid_for_miniblock @@ -298,6 +311,7 @@ impl PostgresStorageCaches { pub fn configure_storage_values_cache( &mut self, capacity: u64, + max_l2_blocks_lag: u32, connection_pool: ConnectionPool, ) -> PostgresStorageCachesTask { assert!( @@ -320,6 +334,7 @@ impl PostgresStorageCaches { PostgresStorageCachesTask { connection_pool, values_cache, + max_l2_blocks_lag, command_receiver, } } @@ -349,6 +364,7 @@ impl PostgresStorageCaches { pub struct PostgresStorageCachesTask { connection_pool: ConnectionPool, values_cache: ValuesCache, + max_l2_blocks_lag: u32, command_receiver: UnboundedReceiver, } @@ -359,32 +375,41 @@ impl PostgresStorageCachesTask { /// /// - Propagates Postgres errors. /// - Propagates errors from the cache update task. + #[tracing::instrument(name = "PostgresStorageCachesTask::run", skip_all)] pub async fn run(mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + tracing::info!( + max_l2_blocks_lag = self.max_l2_blocks_lag, + values_cache.capacity = self.values_cache.capacity(), + "Starting task" + ); + let mut current_l2_block = self.values_cache.valid_for(); loop { - tokio::select! { - _ = stop_receiver.changed() => { - break; - } - Some(to_l2_block) = self.command_receiver.recv() => { - if to_l2_block <= current_l2_block { - continue; - } - let mut connection = self - .connection_pool - .connection_tagged("values_cache_updater") - .await?; - self.values_cache - .update(current_l2_block, to_l2_block, &mut connection) - .await?; - current_l2_block = to_l2_block; - } + let to_l2_block = tokio::select! { + _ = stop_receiver.changed() => break, + Some(to_l2_block) = self.command_receiver.recv() => to_l2_block, else => { // The command sender has been dropped, which means that we must receive the stop signal soon. stop_receiver.changed().await?; break; } + }; + if to_l2_block <= current_l2_block { + continue; + } + + if to_l2_block.0 - current_l2_block.0 > self.max_l2_blocks_lag { + self.values_cache.reset(current_l2_block, to_l2_block)?; + } else { + let mut connection = self + .connection_pool + .connection_tagged("values_cache_updater") + .await?; + self.values_cache + .update(current_l2_block, to_l2_block, &mut connection) + .await?; } + current_l2_block = to_l2_block; } Ok(()) } diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index f88055fa047..029df60cb46 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -462,7 +462,7 @@ async fn wait_for_cache_update(values_cache: &ValuesCache, target_l2_block: L2Bl fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); - let task = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); + let task = caches.configure_storage_values_cache(1_024 * 1_024, 5, pool.clone()); let (stop_sender, stop_receiver) = watch::channel(false); let update_task_handle = tokio::task::spawn(task.run(stop_receiver)); @@ -595,7 +595,7 @@ fn mini_fuzz_values_cache_inner( mut rt_handle: Handle, ) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); - let _ = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); + let _ = caches.configure_storage_values_cache(1_024 * 1_024, 5, pool.clone()); let values_cache = caches.values.as_ref().unwrap().cache.clone(); let mut connection = rt_handle.block_on(pool.connection()).unwrap(); diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 5406cbb7ddf..a12508f615f 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -80,7 +80,6 @@ pub(crate) async fn create_l2_block( l1_tx_count: 0, l2_tx_count: 0, fee_account_address: Address::default(), - pubdata_params: Default::default(), base_fee_per_gas: 0, batch_fee_input: Default::default(), gas_per_pubdata_limit: 0, @@ -89,6 +88,7 @@ pub(crate) async fn create_l2_block( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index 6828eeef8b1..331c47e365e 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -11,18 +11,21 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true zksync_config.workspace = true zksync_crypto_primitives.workspace = true zksync_merkle_tree.workspace = true -zksync_object_store.workspace = true +zksync_multivm.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true zksync_utils.workspace = true anyhow.workspace = true +once_cell.workspace = true serde.workspace = true tracing.workspace = true [dev-dependencies] zksync_contracts.workspace = true +zksync_prover_interface.workspace = true + +bincode.workspace = true diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 595480687e9..140085dbb9f 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -4,27 +4,29 @@ //! executing the VM and verifying all the accessed memory slots by their //! merkle path. -use std::{cell::RefCell, rc::Rc}; - -use anyhow::Context; +use anyhow::{bail, Context, Result}; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }; use zksync_multivm::{ interface::{ - storage::{InMemoryStorage, ReadStorage, StorageView}, + storage::{ReadStorage, StorageSnapshot, StorageView}, FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, + pubdata_builders::pubdata_params_to_builder, vm_latest::HistoryEnabled, LegacyVmInstance, }; use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, Transaction, H256}; -use zksync_utils::bytecode::hash_bytecode; +use zksync_types::{ + block::L2BlockExecutionData, commitment::PubdataParams, L1BatchNumber, StorageLog, + StorageValue, Transaction, H256, +}; +use zksync_utils::u256_to_h256; /// A structure to hold the result of verification. pub struct VerificationResult { @@ -50,29 +52,47 @@ impl Verify for V1TeeVerifierInput { /// not actionable. fn verify(self) -> anyhow::Result { let old_root_hash = self.l1_batch_env.previous_batch_hash.unwrap(); - let l2_chain_id = self.system_env.chain_id; - let enumeration_index = self.witness_input_merkle_paths.next_enumeration_index(); + let enumeration_index = self.merkle_paths.next_enumeration_index(); + let batch_number = self.l1_batch_env.number; - let mut raw_storage = InMemoryStorage::with_custom_system_contracts_and_chain_id( - l2_chain_id, - hash_bytecode, - Vec::with_capacity(0), - ); + let read_storage_ops = self + .vm_run_data + .witness_block_state + .read_storage_key + .into_iter(); - for (hash, bytes) in self.used_contracts.into_iter() { - tracing::trace!("raw_storage.store_factory_dep({hash}, bytes)"); - raw_storage.store_factory_dep(hash, bytes) - } + let initial_writes_ops = self + .vm_run_data + .witness_block_state + .is_write_initial + .into_iter(); - let block_output_with_proofs = - get_bowp_and_set_initial_values(self.witness_input_merkle_paths, &mut raw_storage); + // We need to define storage slots read during batch execution, and their initial state; + // hence, the use of both read_storage_ops and initial_writes_ops. + // StorageSnapshot also requires providing enumeration indices, + // but they only matter at the end of execution when creating pubdata for the batch, + // which is irrelevant in this case. Thus, enumeration indices are set to dummy values. + let storage = read_storage_ops + .enumerate() + .map(|(i, (hash, bytes))| (hash.hashed_key(), Some((bytes, i as u64 + 1u64)))) + .chain(initial_writes_ops.filter_map(|(key, initial_write)| { + initial_write.then_some((key.hashed_key(), None)) + })) + .collect(); - let storage_view = Rc::new(RefCell::new(StorageView::new(&raw_storage))); + let factory_deps = self + .vm_run_data + .used_bytecodes + .into_iter() + .map(|(hash, bytes)| (u256_to_h256(hash), bytes.into_flattened())) + .collect(); - let batch_number = self.l1_batch_env.number; + let storage_snapshot = StorageSnapshot::new(storage, factory_deps); + let storage_view = StorageView::new(storage_snapshot).to_rc_ptr(); let vm = LegacyVmInstance::new(self.l1_batch_env, self.system_env, storage_view); + let vm_out = execute_vm(self.l2_blocks_execution_data, vm, self.pubdata_params)?; - let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; + let block_output_with_proofs = get_bowp(self.merkle_paths)?; let instructions: Vec = generate_tree_instructions(enumeration_index, &block_output_with_proofs, vm_out)?; @@ -89,11 +109,8 @@ impl Verify for V1TeeVerifierInput { } /// Sets the initial storage values and returns `BlockOutputWithProofs` -fn get_bowp_and_set_initial_values( - witness_input_merkle_paths: WitnessInputMerklePaths, - raw_storage: &mut InMemoryStorage, -) -> BlockOutputWithProofs { - let logs = witness_input_merkle_paths +fn get_bowp(witness_input_merkle_paths: WitnessInputMerklePaths) -> Result { + let logs_result: Result<_, _> = witness_input_merkle_paths .into_merkle_paths() .map( |StorageLogMetadata { @@ -110,29 +127,31 @@ fn get_bowp_and_set_initial_values( let merkle_path = merkle_paths.into_iter().map(|x| x.into()).collect(); let base: TreeLogEntry = match (is_write, first_write, leaf_enumeration_index) { (false, _, 0) => TreeLogEntry::ReadMissingKey, - (false, _, _) => { + (false, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Read {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Read { leaf_index: leaf_enumeration_index, value: value_read.into(), } } + (false, true, _) => { + tracing::error!("get_bowp is_write = false, first_write = true"); + bail!("get_bowp is_write = false, first_write = true"); + } (true, true, _) => TreeLogEntry::Inserted, (true, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Updated {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Updated { leaf_index: leaf_enumeration_index, @@ -140,25 +159,28 @@ fn get_bowp_and_set_initial_values( } } }; - TreeLogEntryWithProof { + Ok(TreeLogEntryWithProof { base, merkle_path, root_hash, - } + }) }, ) .collect(); - BlockOutputWithProofs { + let logs: Vec = logs_result?; + + Ok(BlockOutputWithProofs { logs, leaf_count: 0, - } + }) } /// Executes the VM and returns `FinishedL1Batch` on success. fn execute_vm( l2_blocks_execution_data: Vec, mut vm: LegacyVmInstance, + pubdata_params: PubdataParams, ) -> anyhow::Result { let next_l2_blocks_data = l2_blocks_execution_data.iter().skip(1); @@ -176,12 +198,18 @@ fn execute_vm( .context("failed to execute transaction in TeeVerifierInputProducer")?; tracing::trace!("Finished execution of tx: {tx:?}"); } + + tracing::trace!("finished l2_block {l2_block_data:?}"); + tracing::trace!("about to vm.start_new_l2_block {next_l2_block_data:?}"); + vm.start_new_l2_block(L2BlockEnv::from_l2_block_data(next_l2_block_data)); tracing::trace!("Finished execution of l2_block: {:?}", l2_block_data.number); } - Ok(vm.finish_batch()) + tracing::trace!("about to vm.finish_batch()"); + + Ok(vm.finish_batch(pubdata_params_to_builder(pubdata_params))) } /// Map `LogQuery` and `TreeLogEntry` to a `TreeInstruction` @@ -191,7 +219,7 @@ fn map_log_tree( idx: &mut u64, ) -> anyhow::Result { let key = storage_log.key.hashed_key_u256(); - Ok(match (storage_log.is_write(), *tree_log_entry) { + let tree_instruction = match (storage_log.is_write(), *tree_log_entry) { (true, TreeLogEntry::Updated { leaf_index, .. }) => { TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) } @@ -203,24 +231,31 @@ fn map_log_tree( (false, TreeLogEntry::Read { value, .. }) => { if storage_log.value != value { tracing::error!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", - storage_log.value, - value - ); - anyhow::bail!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction: read value {:#?} != {:#?}", storage_log.value, value ); + anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } TreeInstruction::Read(key) } (false, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), - _ => { - tracing::error!("Failed to map LogQuery to TreeInstruction"); + (true, TreeLogEntry::Read { .. }) + | (true, TreeLogEntry::ReadMissingKey) + | (false, TreeLogEntry::Inserted) + | (false, TreeLogEntry::Updated { .. }) => { + tracing::error!( + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction" + ); anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } - }) + }; + + Ok(tree_instruction) } /// Generates the `TreeInstruction`s from the VM executions. @@ -269,8 +304,7 @@ fn execute_tx( mod tests { use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv, TxExecutionMode}; - use zksync_object_store::StoredObject; - use zksync_prover_interface::inputs::TeeVerifierInput; + use zksync_prover_interface::inputs::{TeeVerifierInput, VMRunWitnessInputData}; use zksync_types::U256; use super::*; @@ -278,6 +312,18 @@ mod tests { #[test] fn test_v1_serialization() { let tvi = V1TeeVerifierInput::new( + VMRunWitnessInputData { + l1_batch_number: Default::default(), + used_bytecodes: Default::default(), + initial_heap_content: vec![], + protocol_version: Default::default(), + bootloader_code: vec![], + default_account_code_hash: Default::default(), + evm_emulator_code_hash: Some(Default::default()), + storage_refunds: vec![], + pubdata_costs: vec![], + witness_block_state: Default::default(), + }, WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { @@ -306,21 +352,19 @@ mod tests { code: vec![U256([1; 4])], hash: H256([1; 32]), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: 0, chain_id: Default::default(), - pubdata_params: Default::default(), }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], + Default::default(), ); let tvi = TeeVerifierInput::new(tvi); - let serialized = ::serialize(&tvi) - .expect("Failed to serialize TeeVerifierInput."); + let serialized = bincode::serialize(&tvi).expect("Failed to serialize TeeVerifierInput."); let deserialized: TeeVerifierInput = - ::deserialize(serialized) - .expect("Failed to deserialize TeeVerifierInput."); + bincode::deserialize(&serialized).expect("Failed to deserialize TeeVerifierInput."); assert_eq!(tvi, deserialized); } diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 84ad10b5bbb..5176d90cfd4 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -11,12 +11,12 @@ keywords.workspace = true categories.workspace = true [dependencies] +# **IMPORTANT.** Please do not add dependency on `zksync_config` etc. This crate has a heavy dependency graph as is. zksync_system_constants.workspace = true zksync_utils.workspace = true zksync_basic_types.workspace = true zksync_contracts.workspace = true zksync_mini_merkle_tree.workspace = true -zksync_config.workspace = true zksync_protobuf.workspace = true zksync_crypto_primitives.workspace = true @@ -40,7 +40,6 @@ ethabi.workspace = true tracing.workspace = true # Crypto stuff -secp256k1.workspace = true blake2.workspace = true [dev-dependencies] diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 52d66a29458..daaa5651a03 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -66,3 +66,9 @@ pub struct ConsensusGenesis(pub serde_json::Value); /// The wrapped JSON value corresponds to `zksync_dal::consensus::AttestationStatus`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AttestationStatus(pub serde_json::Value); + +/// Block metadata that should have been committed to on L1, but it is not. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::BlockMetadata`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockMetadata(pub serde_json::Value); diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index e80bed9a9d0..ff24667aa2e 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -15,8 +15,9 @@ pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; use crate::{ - debug_flat_call::DebugCallFlat, protocol_version::L1VerifierConfig, Address, L2BlockNumber, - ProtocolVersionId, + debug_flat_call::{DebugCallFlat, ResultDebugCallFlat}, + protocol_version::L1VerifierConfig, + Address, L2BlockNumber, ProtocolVersionId, }; pub mod en; @@ -706,7 +707,7 @@ pub struct ProtocolVersion { /// Verifier configuration #[deprecated] pub verification_keys_hashes: Option, - /// Hashes of base system contracts (bootloader and default account) + /// Hashes of base system contracts (bootloader, default account and evm emulator) #[deprecated] pub base_system_contracts: Option, /// Bootloader code hash @@ -715,6 +716,9 @@ pub struct ProtocolVersion { /// Default account code hash #[serde(rename = "defaultAccountCodeHash")] pub default_account_code_hash: Option, + /// EVM emulator code hash + #[serde(rename = "evmSimulatorCodeHash")] + pub evm_emulator_code_hash: Option, /// L2 Upgrade transaction hash #[deprecated] pub l2_system_upgrade_tx_hash: Option, @@ -730,6 +734,7 @@ impl ProtocolVersion { timestamp: u64, bootloader_code_hash: H256, default_account_code_hash: H256, + evm_emulator_code_hash: Option, l2_system_upgrade_tx_hash: Option, ) -> Self { Self { @@ -740,9 +745,11 @@ impl ProtocolVersion { base_system_contracts: Some(BaseSystemContractsHashes { bootloader: bootloader_code_hash, default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, }), bootloader_code_hash: Some(bootloader_code_hash), default_account_code_hash: Some(default_account_code_hash), + evm_emulator_code_hash, l2_system_upgrade_tx_hash, l2_system_upgrade_tx_hash_new: l2_system_upgrade_tx_hash, } @@ -758,6 +765,13 @@ impl ProtocolVersion { .or_else(|| self.base_system_contracts.map(|hashes| hashes.default_aa)) } + pub fn evm_emulator_code_hash(&self) -> Option { + self.evm_emulator_code_hash.or_else(|| { + self.base_system_contracts + .and_then(|hashes| hashes.evm_emulator) + }) + } + pub fn minor_version(&self) -> Option { self.minor_version.or(self.version_id) } @@ -813,11 +827,11 @@ pub enum BlockStatus { #[serde(untagged)] pub enum CallTracerBlockResult { CallTrace(Vec), - FlatCallTrace(Vec), + FlatCallTrace(Vec), } impl CallTracerBlockResult { - pub fn unwrap_flat(self) -> Vec { + pub fn unwrap_flat(self) -> Vec { match self { Self::CallTrace(_) => panic!("Result is a FlatCallTrace"), Self::FlatCallTrace(trace) => trace, @@ -991,6 +1005,7 @@ mod tests { base_system_contracts: Some(Default::default()), bootloader_code_hash: Some(Default::default()), default_account_code_hash: Some(Default::default()), + evm_emulator_code_hash: Some(Default::default()), l2_system_upgrade_tx_hash: Default::default(), l2_system_upgrade_tx_hash_new: Default::default(), }; diff --git a/core/lib/types/src/api/state_override.rs b/core/lib/types/src/api/state_override.rs index a2497a65c53..f2986610840 100644 --- a/core/lib/types/src/api/state_override.rs +++ b/core/lib/types/src/api/state_override.rs @@ -21,6 +21,11 @@ impl StateOverride { self.0.get(address) } + /// Gets mutable overrides for the specified account. + pub fn get_mut(&mut self, address: &Address) -> Option<&mut OverrideAccount> { + self.0.get_mut(address) + } + /// Iterates over all account overrides. pub fn iter(&self) -> impl Iterator + '_ { self.0.iter() @@ -48,6 +53,12 @@ impl Bytecode { } } +impl AsRef<[u8]> for Bytecode { + fn as_ref(&self) -> &[u8] { + &self.0 .0 + } +} + impl Serialize for Bytecode { fn serialize(&self, serializer: S) -> Result { self.0.serialize(serializer) diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 1c38e9b1abd..310e3a73b8e 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -65,6 +65,28 @@ pub struct L1BatchHeader { /// Version of protocol used for the L1 batch. pub protocol_version: Option, pub pubdata_input: Option>, + pub fee_address: Address, +} + +impl L1BatchHeader { + pub fn to_unsealed_header(&self, fee_input: BatchFeeInput) -> UnsealedL1BatchHeader { + UnsealedL1BatchHeader { + number: self.number, + timestamp: self.timestamp, + protocol_version: self.protocol_version, + fee_address: self.fee_address, + fee_input, + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct UnsealedL1BatchHeader { + pub number: L1BatchNumber, + pub timestamp: u64, + pub protocol_version: Option, + pub fee_address: Address, + pub fee_input: BatchFeeInput, } /// Holder for the L2 block metadata that is not available from transactions themselves. @@ -81,7 +103,6 @@ pub struct L2BlockHeader { pub batch_fee_input: BatchFeeInput, pub gas_per_pubdata_limit: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, - pub pubdata_params: PubdataParams, pub protocol_version: Option, /// The maximal number of virtual blocks to be created in the L2 block. pub virtual_blocks: u32, @@ -92,6 +113,7 @@ pub struct L2BlockHeader { /// amount of gas can be spent on pubdata. pub gas_limit: u64, pub logs_bloom: Bloom, + pub pubdata_params: PubdataParams, } /// Structure that represents the data is returned by the storage oracle during batch execution. @@ -133,6 +155,7 @@ impl L1BatchHeader { system_logs: vec![], protocol_version: Some(protocol_version), pubdata_input: Some(vec![]), + fee_address: Default::default(), } } diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index a238430bf50..99f6c04d131 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -10,7 +10,7 @@ use std::{collections::HashMap, convert::TryFrom}; use ethabi::Token; use serde::{Deserialize, Serialize}; -pub use zksync_basic_types::{commitment::*, web3::contract::Tokenize}; +pub use zksync_basic_types::commitment::*; use zksync_contracts::BaseSystemContractsHashes; use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher}; use zksync_mini_merkle_tree::MiniMerkleTree; @@ -24,8 +24,8 @@ use crate::{ blob::num_blobs_required, block::{L1BatchHeader, L1BatchTreeData}, l2_to_l1_log::{ - l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes, L2ToL1Log, SystemL2ToL1Log, - UserL2ToL1Log, + l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes_pre_gateway, L2ToL1Log, + SystemL2ToL1Log, UserL2ToL1Log, }, web3::keccak256, writes::{ @@ -303,6 +303,13 @@ pub struct L1BatchAuxiliaryCommonOutput { protocol_version: ProtocolVersionId, } +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)] +#[cfg_attr(test, derive(Serialize, Deserialize))] +pub struct BlobHash { + pub commitment: H256, + pub linear_hash: H256, +} + /// Block Output produced by Virtual Machine #[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] @@ -321,9 +328,8 @@ pub enum L1BatchAuxiliaryOutput { state_diffs_compressed: Vec, state_diffs_hash: H256, aux_commitments: AuxCommitments, - blob_linear_hashes: Vec, - blob_commitments: Vec, - aggregated_root: H256, + blob_hashes: Vec, + aggregation_root: H256, local_root: H256, }, } @@ -373,9 +379,8 @@ impl L1BatchAuxiliaryOutput { system_logs, state_diffs, aux_commitments, - blob_commitments, - blob_linear_hashes, - aggregated_root, + blob_hashes, + aggregation_root, } => { let l2_l1_logs_compressed = serialize_commitments(&common_input.l2_to_l1_logs); let merkle_tree_leaves = l2_l1_logs_compressed @@ -386,11 +391,10 @@ impl L1BatchAuxiliaryOutput { Some(l2_to_l1_logs_tree_size(common_input.protocol_version)), ) .merkle_root(); - let l2_l1_logs_merkle_root = if common_input.protocol_version.is_pre_gateway() { local_root } else { - KeccakHasher.compress(&local_root, &aggregated_root) + KeccakHasher.compress(&local_root, &aggregation_root) }; let common_output = L1BatchAuxiliaryCommonOutput { @@ -420,10 +424,13 @@ impl L1BatchAuxiliaryOutput { "State diff hash mismatch" ); - let blob_linear_hashes_from_logs = parse_system_logs_for_blob_hashes( - &common_input.protocol_version, - &system_logs, - ); + let blob_linear_hashes_from_logs = + parse_system_logs_for_blob_hashes_pre_gateway( + &common_input.protocol_version, + &system_logs, + ); + let blob_linear_hashes: Vec<_> = + blob_hashes.iter().map(|b| b.linear_hash).collect(); assert_eq!( blob_linear_hashes, blob_linear_hashes_from_logs, "Blob linear hashes mismatch" @@ -443,40 +450,42 @@ impl L1BatchAuxiliaryOutput { ); } - assert_eq!( - blob_linear_hashes.len(), - blob_commitments.len(), - "Blob linear hashes and commitments have different lengths" - ); - Self::PostBoojum { common: common_output, system_logs_linear_hash, state_diffs_compressed, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, local_root, - aggregated_root, + aggregation_root, } } } } - pub fn get_local_root(&self) -> H256 { + pub fn local_root(&self) -> H256 { match self { Self::PreBoojum { common, .. } => common.l2_l1_logs_merkle_root, Self::PostBoojum { local_root, .. } => *local_root, } } - pub fn get_aggregated_root(&self) -> H256 { + pub fn aggregation_root(&self) -> H256 { match self { Self::PreBoojum { .. } => H256::zero(), Self::PostBoojum { - aggregated_root, .. - } => *aggregated_root, + aggregation_root, .. + } => *aggregation_root, + } + } + + pub fn state_diff_hash(&self) -> H256 { + match self { + Self::PreBoojum { .. } => H256::zero(), + Self::PostBoojum { + state_diffs_hash, .. + } => *state_diffs_hash, } } @@ -500,8 +509,7 @@ impl L1BatchAuxiliaryOutput { system_logs_linear_hash, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, .. } => { result.extend(system_logs_linear_hash.as_bytes()); @@ -513,9 +521,9 @@ impl L1BatchAuxiliaryOutput { ); result.extend(aux_commitments.events_queue_commitment.as_bytes()); - for i in 0..blob_commitments.len() { - result.extend(blob_linear_hashes[i].as_bytes()); - result.extend(blob_commitments[i].as_bytes()); + for b in blob_hashes { + result.extend(b.linear_hash.as_bytes()); + result.extend(b.commitment.as_bytes()); } } } @@ -541,6 +549,7 @@ pub struct L1BatchMetaParameters { pub zkporter_is_available: bool, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: Option, } @@ -556,10 +565,11 @@ impl L1BatchMetaParameters { .protocol_version .map_or(false, |ver| ver.is_post_1_5_0()) { - // EVM simulator hash for now is the same as the default AA hash. - result.extend(self.default_aa_code_hash.as_bytes()); + let evm_emulator_code_hash = self + .evm_emulator_code_hash + .unwrap_or(self.default_aa_code_hash); + result.extend(evm_emulator_code_hash.as_bytes()); } - result } @@ -625,6 +635,7 @@ impl L1BatchCommitment { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: input.common().bootloader_code_hash, default_aa_code_hash: input.common().default_aa_code_hash, + evm_emulator_code_hash: input.common().evm_emulator_code_hash, protocol_version: Some(input.common().protocol_version), }; @@ -700,24 +711,17 @@ impl L1BatchCommitment { ), }; - let state_diff_hash = match &self.auxiliary_output { - L1BatchAuxiliaryOutput::PostBoojum { - state_diffs_hash, .. - } => *state_diffs_hash, - L1BatchAuxiliaryOutput::PreBoojum { .. } => H256::zero(), - }; - L1BatchCommitmentArtifacts { commitment_hash: self.hash(), l2_l1_merkle_root: self.l2_l1_logs_merkle_root(), compressed_state_diffs, zkporter_is_available: self.meta_parameters.zkporter_is_available, aux_commitments: self.aux_commitments(), - state_diff_hash, compressed_initial_writes, compressed_repeated_writes, - local_root: self.auxiliary_output.get_local_root(), - aggregation_root: self.auxiliary_output.get_aggregated_root(), + local_root: self.auxiliary_output.local_root(), + aggregation_root: self.auxiliary_output.aggregation_root(), + state_diff_hash: self.auxiliary_output.state_diff_hash(), } } } @@ -737,6 +741,7 @@ pub struct CommitmentCommonInput { pub rollup_root_hash: H256, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: ProtocolVersionId, } @@ -753,9 +758,8 @@ pub enum CommitmentInput { system_logs: Vec, state_diffs: Vec, aux_commitments: AuxCommitments, - blob_commitments: Vec, - blob_linear_hashes: Vec, - aggregated_root: H256, + blob_hashes: Vec, + aggregation_root: H256, }, } @@ -779,6 +783,7 @@ impl CommitmentInput { rollup_root_hash, bootloader_code_hash: base_system_contracts_hashes.bootloader, default_aa_code_hash: base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: base_system_contracts_hashes.evm_emulator, protocol_version, }; if protocol_version.is_pre_boojum() { @@ -796,17 +801,11 @@ impl CommitmentInput { events_queue_commitment: H256::zero(), bootloader_initial_content_commitment: H256::zero(), }, - blob_commitments: { - let num_blobs = num_blobs_required(&protocol_version); - - vec![H256::zero(); num_blobs] - }, - blob_linear_hashes: { + blob_hashes: { let num_blobs = num_blobs_required(&protocol_version); - - vec![H256::zero(); num_blobs] + vec![Default::default(); num_blobs] }, - aggregated_root: H256::zero(), + aggregation_root: H256::zero(), } } } @@ -816,12 +815,12 @@ impl CommitmentInput { pub struct L1BatchCommitmentArtifacts { pub commitment_hash: L1BatchCommitmentHash, pub l2_l1_merkle_root: H256, - pub aggregation_root: H256, - pub local_root: H256, pub compressed_state_diffs: Option>, pub compressed_initial_writes: Option>, pub compressed_repeated_writes: Option>, - pub state_diff_hash: H256, pub zkporter_is_available: bool, pub aux_commitments: Option, + pub aggregation_root: H256, + pub local_root: H256, + pub state_diff_hash: H256, } diff --git a/core/lib/types/src/commitment/tests/mod.rs b/core/lib/types/src/commitment/tests/mod.rs index b416e21c4ab..a95318309a2 100644 --- a/core/lib/types/src/commitment/tests/mod.rs +++ b/core/lib/types/src/commitment/tests/mod.rs @@ -51,6 +51,11 @@ fn post_boojum_1_5_0() { run_test("post_boojum_1_5_0_test"); } +#[test] +fn post_boojum_1_5_0_with_evm() { + run_test("post_boojum_1_5_0_test_with_evm"); +} + #[test] fn post_gateway() { run_test("post_gateway_test"); diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json index 87f7c9e51ea..c854a6e77d8 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json @@ -190,15 +190,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -253,15 +255,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", "local_root": "0xe52d57bd64cabf6c588b30365512da2bf10912c106e7a06483b236d05ac4037e" } }, diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json index 4ae5b361b7c..96aa8ab842c 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json @@ -206,15 +206,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" - ], - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -269,15 +271,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004" - ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", "local_root": "0x0b6e1ad4643cc2bee06b5e173184ec822d80826e5720f5715172898350433299" } }, diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json index 78f7afb372d..ed61ea67cef 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json @@ -238,43 +238,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -329,43 +359,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" } }, diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json new file mode 100644 index 00000000000..a41aa33c04a --- /dev/null +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json @@ -0,0 +1,440 @@ +{ + "input": { + "PostBoojum": { + "common": { + "l2_to_l1_logs": [ + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x7814f203b8e02f6a676b8f7faefcf732d8b4368bab25239ea4525010aa85d5ee", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "rollup_last_leaf_index": 89, + "rollup_root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b", + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "system_logs": [ + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 0, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000004", + "value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + }, + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 1, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000003", + "value": "0x00000000000000000000000065c22f8000000000000000000000000065c22f81" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000005", + "value": "0x155c82febe94e07df0065c153e8ed403b5351fd64d657c8dffbfbee8ec3d2ba3" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000006", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x85a7fb853512ba6575c99ee121dd560559523a4587a2cd7e83cd359cd9ea2aed" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000002", + "value": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000007", + "value": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000008" + } + ], + "state_diffs": [ + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1", + "derived_key": [ + 113, 233, 23, 33, 249, 145, 133, 118, 215, 96, 240, 47, 3, 202, 196, + 124, 111, 64, 3, 49, 96, 49, 132, 142, 60, 29, 153, 230, 232, 58, + 71, 67 + ], + "enumeration_index": 49, + "initial_value": "0x18776f28c303800", + "final_value": "0x708da482cab20760" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x294a00337abeee2b3cd948ffeed92231e2a3acc2eb11210400e0aa9557f23e26", + "derived_key": [ + 45, 90, 105, 98, 204, 206, 229, 212, 173, 180, 138, 54, 187, 191, + 68, 58, 83, 23, 33, 72, 67, 129, 18, 89, 55, 243, 0, 26, 197, 255, + 135, 91 + ], + "enumeration_index": 50, + "initial_value": "0xf5559e28fd66c0", + "final_value": "0xf5a19b324caf80" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, 166, 75, 35, 133, + 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, 64, 49, 220, + 193, 72, 27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x6f05e193353286a0" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x7", + "derived_key": [ + 18, 59, 175, 197, 134, 247, 119, 100, 72, 140, 210, 76, 106, 119, + 84, 110, 90, 15, 232, 189, 251, 79, 162, 3, 207, 175, 252, 54, 204, + 228, 221, 91 + ], + "enumeration_index": 53, + "initial_value": "0x100000000000000000000000065c22e3e", + "final_value": "0x200000000000000000000000065c22f80" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x9", + "derived_key": [ + 142, 125, 208, 106, 197, 183, 59, 71, 59, 230, 188, 90, 81, 3, 15, + 76, 116, 55, 101, 124, 183, 178, 155, 243, 118, 197, 100, 184, 209, + 103, 90, 94 + ], + "enumeration_index": 54, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xd", + "derived_key": [ + 235, 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, + 40, 14, 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, + 247, 152, 97 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xebbe609cd3ccd11f273eb94374d6d3a2f7856c5f1039dc4877c6a334188ac7c1" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xe", + "derived_key": [ + 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, 58, + 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x708e7fcf68ebab6c87322686cac4bcdb5f2bd4c71f337b18d147fd9a6c44ad13" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10c", + "derived_key": [ + 121, 9, 53, 136, 208, 232, 71, 239, 167, 58, 16, 206, 32, 228, 121, + 159, 177, 228, 102, 66, 214, 86, 23, 199, 229, 33, 63, 160, 73, 137, + 217, 45 + ], + "enumeration_index": 57, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xad67d757c34507f157cacfa2e3153e9f260a2244f30428821be7be64587ac55f", + "derived_key": [ + 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, 44, 164, 124, 169, + 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, 57, 36, 22, + 48, 203, 70 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + } + ], + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "pass_through_data": { + "shared_states": [ + { + "last_leaf_index": 89, + "root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b" + }, + { + "last_leaf_index": 0, + "root_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "meta_parameters": { + "zkporter_is_available": false, + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "auxiliary_output": { + "PostBoojum": { + "common": { + "l2_l1_logs_merkle_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff", + "protocol_version": "Version23" + }, + "system_logs_linear_hash": "0x602dacc0a26e3347f0679924c4ae151ff5200e7dd80902fe0fc11c806c4d3ffb", + "state_diffs_compressed": [ + 1, 0, 1, 72, 4, 0, 4, 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, + 166, 75, 35, 133, 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, + 64, 49, 220, 193, 72, 27, 65, 111, 5, 225, 147, 53, 50, 134, 160, 235, + 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, 40, 14, + 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, 247, 152, 97, + 0, 235, 190, 96, 156, 211, 204, 209, 31, 39, 62, 185, 67, 116, 214, 211, + 162, 247, 133, 108, 95, 16, 57, 220, 72, 119, 198, 163, 52, 24, 138, + 199, 193, 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, + 58, 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216, 0, 112, 142, 127, 207, 104, 235, 171, 108, 135, 50, 38, 134, + 202, 196, 188, 219, 95, 43, 212, 199, 31, 51, 123, 24, 209, 71, 253, + 154, 108, 68, 173, 19, 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, + 44, 164, 124, 169, 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, + 57, 36, 22, 48, 203, 70, 0, 85, 97, 141, 181, 255, 36, 174, 228, 210, + 54, 146, 27, 111, 66, 114, 16, 17, 97, 19, 113, 21, 163, 180, 196, 166, + 95, 134, 119, 177, 36, 192, 28, 0, 0, 0, 49, 65, 111, 6, 45, 144, 62, + 129, 207, 96, 0, 0, 0, 50, 49, 75, 253, 9, 79, 72, 192, 0, 0, 0, 53, + 137, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 54, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 57, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66 + ], + "state_diffs_hash": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0", + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" + } + }, + "hashes": { + "pass_through_data": "0x6a3ffc0f55d4abce9498b8bcb01a3018bc2b83d96acb27e23772fe9347954725", + "aux_output": "0xadc63d9c45f85598f3e3c232970315d1f6ac96222e379e16ced7a204524a4061", + "meta_parameters": "0x02531e5cc22688523a4ac9317e5097743771f6914015cf1152491cf22084bd58", + "commitment": "0x4fdd8c5b231dfc9fc81aba744a90fbec78627f529ac29f9fc758a7b9e62fa321" + } +} diff --git a/core/lib/types/src/commitment/tests/post_gateway_test.json b/core/lib/types/src/commitment/tests/post_gateway_test.json index a421bd41f95..4b598ff59f4 100644 --- a/core/lib/types/src/commitment/tests/post_gateway_test.json +++ b/core/lib/types/src/commitment/tests/post_gateway_test.json @@ -9,7 +9,7 @@ "PostBoojum": { "common": { "l2_l1_logs_merkle_root": "0x38eaeef3afe69b6f6b2fa22c92da8137f1e405a1e1861b7de7cfa30c7d7462dd", - "protocol_version": "Version25" + "protocol_version": "Version27" }, "system_logs_linear_hash": "0xe8460ce1ed47b77cfee3cadf803aa089c144c506ea2bdd358a6a38ff2c7bc8e3", "state_diffs_compressed": [ @@ -20,43 +20,73 @@ "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" }, - "blob_linear_hashes": [ - "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_commitments": [ - "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", "local_root": "0xd4790efa9052ea67dcb473de870e3522e2fc340374e6293ad4646fde312c8c76" } }, @@ -64,7 +94,7 @@ "zkporter_is_available": false, "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", - "protocol_version": "Version25" + "protocol_version": "Version27" }, "pass_through_data": { "shared_states": [ @@ -311,7 +341,7 @@ "rollup_root_hash": "0x0332d2acc43785a44b2b84fc010372c8f3e4ff4d0ca5f312de142ffe74189500", "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", - "protocol_version": "Version25" + "protocol_version": "Version27" }, "system_logs": [ { @@ -1875,43 +1905,73 @@ "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" }, - "blob_commitments": [ - "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_linear_hashes": [ - "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" } } } diff --git a/core/lib/types/src/debug_flat_call.rs b/core/lib/types/src/debug_flat_call.rs index 89a008b5fb5..5809026e521 100644 --- a/core/lib/types/src/debug_flat_call.rs +++ b/core/lib/types/src/debug_flat_call.rs @@ -3,6 +3,13 @@ use zksync_basic_types::{web3::Bytes, U256}; use crate::{api::DebugCallType, Address, H256}; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ResultDebugCallFlat { + pub tx_hash: H256, + pub result: Vec, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DebugCallFlat { @@ -12,6 +19,8 @@ pub struct DebugCallFlat { pub trace_address: Vec, pub transaction_position: usize, pub transaction_hash: H256, + pub block_number: u32, + pub block_hash: H256, pub r#type: DebugCallType, } @@ -32,3 +41,11 @@ pub struct CallResult { pub output: Bytes, pub gas_used: U256, } + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct CallTraceMeta { + pub index_in_block: usize, + pub tx_hash: H256, + pub block_number: u32, + pub block_hash: H256, +} diff --git a/core/lib/types/src/fee_model.rs b/core/lib/types/src/fee_model.rs index b59aa65b04e..ae346656ea6 100644 --- a/core/lib/types/src/fee_model.rs +++ b/core/lib/types/src/fee_model.rs @@ -1,11 +1,13 @@ +// FIXME: separate crate together with node_fee_model interfaces? + use std::num::NonZeroU64; use bigdecimal::{BigDecimal, ToPrimitive}; use serde::{Deserialize, Serialize}; -use zksync_config::configs::chain::{FeeModelVersion, StateKeeperConfig}; use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE; +use zksync_utils::ceil_div_u256; -use crate::ProtocolVersionId; +use crate::{ProtocolVersionId, U256}; /// Fee input to be provided into the VM. It contains two options: /// - `L1Pegged`: L1 gas price is provided to the VM, and the pubdata price is derived from it. Using this option is required for the @@ -203,6 +205,7 @@ pub struct FeeModelConfigV2 { /// The maximum amount of pubdata that can be used by the batch. Note that if the calldata is used as pubdata, this variable should not exceed 128kb. pub max_pubdata_per_batch: u64, } + impl Default for FeeModelConfig { /// Config with all zeroes is not a valid config (since for instance having 0 max gas per batch may incur division by zero), /// so we implement a sensible default config here. @@ -213,24 +216,6 @@ impl Default for FeeModelConfig { } } -impl FeeModelConfig { - pub fn from_state_keeper_config(state_keeper_config: &StateKeeperConfig) -> Self { - match state_keeper_config.fee_model_version { - FeeModelVersion::V1 => Self::V1(FeeModelConfigV1 { - minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, - }), - FeeModelVersion::V2 => Self::V2(FeeModelConfigV2 { - minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, - compute_overhead_part: state_keeper_config.compute_overhead_part, - pubdata_overhead_part: state_keeper_config.pubdata_overhead_part, - batch_overhead_l1_gas: state_keeper_config.batch_overhead_l1_gas, - max_gas_per_batch: state_keeper_config.max_gas_per_batch, - max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, - }), - } - } -} - #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct FeeParamsV1 { pub config: FeeModelConfigV1, @@ -337,4 +322,442 @@ impl FeeParams { l1_gas_price: 1_000_000_000, }) } + + /// Provides scaled [`BatchFeeInput`] based on these parameters. + pub fn scale( + self, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, + ) -> BatchFeeInput { + match self { + Self::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( + params, + l1_gas_price_scale_factor, + )), + Self::V2(params) => BatchFeeInput::PubdataIndependent(clip_batch_fee_model_input_v2( + compute_batch_fee_model_input_v2( + params, + l1_gas_price_scale_factor, + l1_pubdata_price_scale_factor, + ), + )), + } + } +} + +/// Calculates the batch fee input based on the main node parameters. +/// This function uses the `V1` fee model, i.e. where the pubdata price does not include the proving costs. +fn compute_batch_fee_model_input_v1( + params: FeeParamsV1, + l1_gas_price_scale_factor: f64, +) -> L1PeggedBatchFeeModelInput { + let l1_gas_price = (params.l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; + + L1PeggedBatchFeeModelInput { + l1_gas_price, + fair_l2_gas_price: params.config.minimal_l2_gas_price, + } +} + +/// Calculates the batch fee input based on the main node parameters. +/// This function uses the `V2` fee model, i.e. where the pubdata price does not include the proving costs. +fn compute_batch_fee_model_input_v2( + params: FeeParamsV2, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, +) -> PubdataIndependentBatchFeeModelInput { + let config = params.config(); + let l1_gas_price = params.l1_gas_price(); + let l1_pubdata_price = params.l1_pubdata_price(); + + let FeeModelConfigV2 { + minimal_l2_gas_price, + compute_overhead_part, + pubdata_overhead_part, + batch_overhead_l1_gas, + max_gas_per_batch, + max_pubdata_per_batch, + } = config; + + // Firstly, we scale the gas price and pubdata price in case it is needed. + let l1_gas_price = (l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; + let l1_pubdata_price = (l1_pubdata_price as f64 * l1_pubdata_price_scale_factor) as u64; + + // While the final results of the calculations are not expected to have any overflows, the intermediate computations + // might, so we use U256 for them. + let l1_batch_overhead_wei = U256::from(l1_gas_price) * U256::from(batch_overhead_l1_gas); + + let fair_l2_gas_price = { + // Firstly, we calculate which part of the overall overhead each unit of L2 gas should cover. + let l1_batch_overhead_per_gas = + ceil_div_u256(l1_batch_overhead_wei, U256::from(max_gas_per_batch)); + + // Then, we multiply by the `compute_overhead_part` to get the overhead for the computation for each gas. + // Also, this means that if we almost never close batches because of compute, the `compute_overhead_part` should be zero and so + // it is possible that the computation costs include for no overhead. + let gas_overhead_wei = + (l1_batch_overhead_per_gas.as_u64() as f64 * compute_overhead_part) as u64; + + // We sum up the minimal L2 gas price (i.e. the raw prover/compute cost of a single L2 gas) and the overhead for batch being closed. + minimal_l2_gas_price + gas_overhead_wei + }; + + let fair_pubdata_price = { + // Firstly, we calculate which part of the overall overhead each pubdata byte should cover. + let l1_batch_overhead_per_pubdata = + ceil_div_u256(l1_batch_overhead_wei, U256::from(max_pubdata_per_batch)); + + // Then, we multiply by the `pubdata_overhead_part` to get the overhead for each pubdata byte. + // Also, this means that if we almost never close batches because of pubdata, the `pubdata_overhead_part` should be zero and so + // it is possible that the pubdata costs include no overhead. + let pubdata_overhead_wei = + (l1_batch_overhead_per_pubdata.as_u64() as f64 * pubdata_overhead_part) as u64; + + // We sum up the raw L1 pubdata price (i.e. the expected price of publishing a single pubdata byte) and the overhead for batch being closed. + l1_pubdata_price + pubdata_overhead_wei + }; + + PubdataIndependentBatchFeeModelInput { + l1_gas_price, + fair_l2_gas_price, + fair_pubdata_price, + } +} + +/// Bootloader places limitations on fair_l2_gas_price and fair_pubdata_price. +/// (MAX_ALLOWED_FAIR_L2_GAS_PRICE and MAX_ALLOWED_FAIR_PUBDATA_PRICE in bootloader code respectively) +/// Server needs to clip this prices in order to allow chain continues operation at a loss. The alternative +/// would be to stop accepting the transactions until the conditions improve. +/// TODO (PE-153): to be removed when bootloader limitation is removed +fn clip_batch_fee_model_input_v2( + fee_model: PubdataIndependentBatchFeeModelInput, +) -> PubdataIndependentBatchFeeModelInput { + /// MAX_ALLOWED_FAIR_L2_GAS_PRICE + const MAXIMUM_L2_GAS_PRICE: u64 = 10_000_000_000_000; + /// MAX_ALLOWED_FAIR_PUBDATA_PRICE + const MAXIMUM_PUBDATA_PRICE: u64 = 1_000_000_000_000_000; + PubdataIndependentBatchFeeModelInput { + l1_gas_price: fee_model.l1_gas_price, + fair_l2_gas_price: if fee_model.fair_l2_gas_price < MAXIMUM_L2_GAS_PRICE { + fee_model.fair_l2_gas_price + } else { + tracing::warn!( + "Fair l2 gas price {} exceeds maximum. Limitting to {}", + fee_model.fair_l2_gas_price, + MAXIMUM_L2_GAS_PRICE + ); + MAXIMUM_L2_GAS_PRICE + }, + fair_pubdata_price: if fee_model.fair_pubdata_price < MAXIMUM_PUBDATA_PRICE { + fee_model.fair_pubdata_price + } else { + tracing::warn!( + "Fair pubdata price {} exceeds maximum. Limitting to {}", + fee_model.fair_pubdata_price, + MAXIMUM_PUBDATA_PRICE + ); + MAXIMUM_PUBDATA_PRICE + }, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // To test that overflow never happens, we'll use giant L1 gas price, i.e. + // almost realistic very large value of 100k gwei. Since it is so large, we'll also + // use it for the L1 pubdata price. + const GWEI: u64 = 1_000_000_000; + const GIANT_L1_GAS_PRICE: u64 = 100_000 * GWEI; + + // As a small L2 gas price we'll use the value of 1 wei. + const SMALL_L1_GAS_PRICE: u64 = 1; + + #[test] + fn test_compute_batch_fee_model_input_v2_giant_numbers() { + let config = FeeModelConfigV2 { + minimal_l2_gas_price: GIANT_L1_GAS_PRICE, + // We generally don't expect those values to be larger than 1. Still, in theory the operator + // may need to set higher values in extreme cases. + compute_overhead_part: 5.0, + pubdata_overhead_part: 5.0, + // The batch overhead would likely never grow beyond that + batch_overhead_l1_gas: 1_000_000, + // Let's imagine that for some reason the limit is relatively small + max_gas_per_batch: 50_000_000, + // The pubdata will likely never go below that + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + // We'll use scale factor of 3.0 + let input = compute_batch_fee_model_input_v2(params, 3.0, 3.0); + + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE * 3); + assert_eq!(input.fair_l2_gas_price, 130_000_000_000_000); + assert_eq!(input.fair_pubdata_price, 15_300_000_000_000_000); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_small_numbers() { + // Here we assume that the operator wants to make the lives of users as cheap as possible. + let config = FeeModelConfigV2 { + minimal_l2_gas_price: SMALL_L1_GAS_PRICE, + compute_overhead_part: 0.0, + pubdata_overhead_part: 0.0, + batch_overhead_l1_gas: 0, + max_gas_per_batch: 50_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + SMALL_L1_GAS_PRICE, + SMALL_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + + assert_eq!(input.l1_gas_price, SMALL_L1_GAS_PRICE); + assert_eq!(input.fair_l2_gas_price, SMALL_L1_GAS_PRICE); + assert_eq!(input.fair_pubdata_price, SMALL_L1_GAS_PRICE); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_only_pubdata_overhead() { + // Here we use sensible config, but when only pubdata is used to close the batch + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 0.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); + // The fair L2 gas price is identical to the minimal one. + assert_eq!(input.fair_l2_gas_price, 100_000_000_000); + // The fair pubdata price is the minimal one plus the overhead. + assert_eq!(input.fair_pubdata_price, 800_000_000_000_000); + } + + #[test] + fn test_compute_baxtch_fee_model_input_v2_only_compute_overhead() { + // Here we use sensible config, but when only compute is used to close the batch + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 1.0, + pubdata_overhead_part: 0.0, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); + // The fair L2 gas price is identical to the minimal one, plus the overhead + assert_eq!(input.fair_l2_gas_price, 240_000_000_000); + // The fair pubdata price is equal to the original one. + assert_eq!(input.fair_pubdata_price, GIANT_L1_GAS_PRICE); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_param_tweaking() { + // In this test we generally checking that each param behaves as expected + let base_config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let base_params = FeeParamsV2::new( + base_config, + 1_000_000_000, + 1_000_000_000, + BaseTokenConversionRatio::default(), + ); + + let base_input = compute_batch_fee_model_input_v2(base_params, 1.0, 1.0); + + let base_input_larger_l1_gas_price = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + base_config, + 2_000_000_000, // double the L1 gas price + 1_000_000_000, + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + let base_input_scaled_l1_gas_price = + compute_batch_fee_model_input_v2(base_params, 2.0, 1.0); + assert_eq!( + base_input_larger_l1_gas_price, base_input_scaled_l1_gas_price, + "Scaling has the correct effect for the L1 gas price" + ); + assert!( + base_input.fair_l2_gas_price < base_input_larger_l1_gas_price.fair_l2_gas_price, + "L1 gas price increase raises L2 gas price" + ); + assert!( + base_input.fair_pubdata_price < base_input_larger_l1_gas_price.fair_pubdata_price, + "L1 gas price increase raises pubdata price" + ); + + let base_input_larger_pubdata_price = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + base_config, + 1_000_000_000, + 2_000_000_000, // double the L1 pubdata price + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + let base_input_scaled_pubdata_price = + compute_batch_fee_model_input_v2(base_params, 1.0, 2.0); + assert_eq!( + base_input_larger_pubdata_price, base_input_scaled_pubdata_price, + "Scaling has the correct effect for the pubdata price" + ); + assert_eq!( + base_input.fair_l2_gas_price, base_input_larger_pubdata_price.fair_l2_gas_price, + "L1 pubdata increase has no effect on L2 gas price" + ); + assert!( + base_input.fair_pubdata_price < base_input_larger_pubdata_price.fair_pubdata_price, + "Pubdata price increase raises pubdata price" + ); + + let base_input_larger_max_gas = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + FeeModelConfigV2 { + max_gas_per_batch: base_config.max_gas_per_batch * 2, + ..base_config + }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + assert!( + base_input.fair_l2_gas_price > base_input_larger_max_gas.fair_l2_gas_price, + "Max gas increase lowers L2 gas price" + ); + assert_eq!( + base_input.fair_pubdata_price, base_input_larger_max_gas.fair_pubdata_price, + "Max gas increase has no effect on pubdata price" + ); + + let base_input_larger_max_pubdata = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + FeeModelConfigV2 { + max_pubdata_per_batch: base_config.max_pubdata_per_batch * 2, + ..base_config + }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + assert_eq!( + base_input.fair_l2_gas_price, base_input_larger_max_pubdata.fair_l2_gas_price, + "Max pubdata increase has no effect on L2 gas price" + ); + assert!( + base_input.fair_pubdata_price > base_input_larger_max_pubdata.fair_pubdata_price, + "Max pubdata increase lowers pubdata price" + ); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_l1_gas() { + // In this test we check the gas price limit works as expected + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100 * GWEI, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let l1_gas_price = 1_000_000_000 * GWEI; + let params = FeeParamsV2::new( + config, + l1_gas_price, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, l1_gas_price); + // The fair L2 gas price is identical to the maximum + assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); + assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_conversion_rate() { + // In this test we check the gas price limit works as expected + let config = FeeModelConfigV2 { + minimal_l2_gas_price: GWEI, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GWEI, + 2 * GWEI, + BaseTokenConversionRatio { + numerator: NonZeroU64::new(3_000_000).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + }, + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, 3_000_000 * GWEI); + // The fair L2 gas price is identical to the maximum + assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); + assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); + } } diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 036d2a7a036..48e813e571d 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -216,7 +216,9 @@ impl L2Tx { let raw = req.get_signed_bytes(&sig).context("get_signed_bytes")?; let (req, hash) = TransactionRequest::from_bytes_unverified(&raw).context("from_bytes_unverified()")?; - let mut tx = L2Tx::from_request_unverified(req).context("from_request_unverified()")?; + // Since we allow users to specify `None` recipient, EVM emulation is implicitly enabled. + let mut tx = + L2Tx::from_request_unverified(req, true).context("from_request_unverified()")?; tx.set_input(raw, hash); Ok(tx) } diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 59ade8873cd..957cfa9a1a6 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -1,5 +1,5 @@ use serde::{Deserialize, Serialize}; -use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; +use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; use crate::{ blob::{num_blobs_created, num_blobs_required}, @@ -80,10 +80,15 @@ pub fn l2_to_l1_logs_tree_size(protocol_version: ProtocolVersionId) -> usize { } /// Returns the blob hashes parsed out from the system logs -pub fn parse_system_logs_for_blob_hashes( +pub fn parse_system_logs_for_blob_hashes_pre_gateway( protocol_version: &ProtocolVersionId, system_logs: &[SystemL2ToL1Log], ) -> Vec { + assert!( + protocol_version.is_pre_gateway(), + "Cannot parse blob linear hashes from system logs for post gateway" + ); + let num_required_blobs = num_blobs_required(protocol_version) as u32; let num_created_blobs = num_blobs_created(protocol_version) as u32; @@ -95,9 +100,11 @@ pub fn parse_system_logs_for_blob_hashes( .iter() .filter(|log| { log.0.sender == PUBDATA_CHUNK_PUBLISHER_ADDRESS - && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY as u64) + && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY as u64) && log.0.key - < H256::from_low_u64_be((BLOB1_LINEAR_HASH_KEY + num_created_blobs) as u64) + < H256::from_low_u64_be( + (BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY + num_created_blobs) as u64, + ) }) .map(|log| (log.0.key, log.0.value)) .collect::>(); diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 86b2e3f03d5..69e6e42fd51 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -5,7 +5,7 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use std::{fmt, fmt::Debug}; +use std::fmt; use anyhow::Context as _; use fee::encoding_len; @@ -43,7 +43,6 @@ pub mod l2; pub mod l2_to_l1_log; pub mod priority_op_onchain_data; pub mod protocol_upgrade; -pub mod pubdata_da; pub mod snapshots; pub mod storage; pub mod system_contracts; @@ -88,9 +87,16 @@ pub struct Transaction { pub raw_bytes: Option, } -impl std::fmt::Debug for Transaction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("Transaction").field(&self.hash()).finish() +impl fmt::Debug for Transaction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(hash) = self.hash_for_debugging() { + f.debug_tuple("Transaction").field(&hash).finish() + } else { + f.debug_struct("Transaction") + .field("initiator_account", &self.initiator_account()) + .field("nonce", &self.nonce()) + .finish() + } } } @@ -136,6 +142,15 @@ impl Transaction { } } + fn hash_for_debugging(&self) -> Option { + match &self.common_data { + ExecuteTransactionCommon::L1(data) => Some(data.hash()), + ExecuteTransactionCommon::L2(data) if data.input.is_some() => Some(data.hash()), + ExecuteTransactionCommon::L2(_) => None, + ExecuteTransactionCommon::ProtocolUpgrade(data) => Some(data.hash()), + } + } + /// Returns the account that initiated this transaction. pub fn initiator_account(&self) -> Address { match &self.common_data { @@ -315,9 +330,14 @@ impl TryFrom for abi::Transaction { } } -impl TryFrom for Transaction { - type Error = anyhow::Error; - fn try_from(tx: abi::Transaction) -> anyhow::Result { +impl Transaction { + /// Converts a transaction from its ABI representation. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables L2 transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_abi(tx: abi::Transaction, allow_no_target: bool) -> anyhow::Result { Ok(match tx { abi::Transaction::L1 { tx, @@ -389,7 +409,7 @@ impl TryFrom for Transaction { abi::Transaction::L2(raw) => { let (req, hash) = transaction_request::TransactionRequest::from_bytes_unverified(&raw)?; - let mut tx = L2Tx::from_request_unverified(req)?; + let mut tx = L2Tx::from_request_unverified(req, allow_no_target)?; tx.set_input(raw, hash); tx.into() } diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index ff3030d1b4f..2461db26593 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -64,6 +64,8 @@ pub struct ProtocolUpgrade { pub bootloader_code_hash: Option, /// New default account code hash. pub default_account_code_hash: Option, + /// New EVM emulator code hash + pub evm_emulator_code_hash: Option, /// New verifier params. pub verifier_params: Option, /// New verifier address. @@ -120,17 +122,21 @@ impl ProtocolUpgrade { bootloader_code_hash: (bootloader_hash != H256::zero()).then_some(bootloader_hash), default_account_code_hash: (default_account_hash != H256::zero()) .then_some(default_account_hash), + evm_emulator_code_hash: None, // EVM emulator upgrades are not supported yet verifier_params: (upgrade.verifier_params != abi::VerifierParams::default()) .then_some(upgrade.verifier_params.into()), verifier_address: (upgrade.verifier != Address::zero()).then_some(upgrade.verifier), timestamp: upgrade.upgrade_timestamp.try_into().unwrap(), tx: (upgrade.l2_protocol_upgrade_tx.tx_type != U256::zero()) .then(|| { - Transaction::try_from(abi::Transaction::L1 { - tx: upgrade.l2_protocol_upgrade_tx, - factory_deps: upgrade.factory_deps, - eth_block: 0, - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: upgrade.l2_protocol_upgrade_tx, + factory_deps: upgrade.factory_deps, + eth_block: 0, + }, + true, + ) .context("Transaction::try_from()")? .try_into() .map_err(|err| anyhow::format_err!("try_into::(): {err}")) @@ -169,14 +175,17 @@ pub fn decode_genesis_upgrade_event( .unwrap_or_else(|_| panic!("Version is not supported, packed version: {full_version_id}")); Ok(( protocol_version, - Transaction::try_from(abi::Transaction::L1 { - tx: tx.into(), - eth_block: event - .block_number - .expect("Event block number is missing") - .as_u64(), - factory_deps, - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: tx.into(), + eth_block: event + .block_number + .expect("Event block number is missing") + .as_u64(), + factory_deps, + }, + true, + ) .unwrap() .try_into() .unwrap(), @@ -321,6 +330,9 @@ impl ProtocolVersion { default_aa: upgrade .default_account_code_hash .unwrap_or(self.base_system_contracts_hashes.default_aa), + evm_emulator: upgrade + .evm_emulator_code_hash + .or(self.base_system_contracts_hashes.evm_emulator), }, tx: upgrade.tx, } diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index 9626a5515dc..3294168b27d 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -6,7 +6,7 @@ pub use log::*; use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::keccak256, L2ChainId}; pub use zksync_system_constants::*; -use zksync_utils::address_to_h256; +use zksync_utils::{address_to_h256, u256_to_h256}; use crate::{AccountTreeId, Address, H160, H256, U256}; @@ -90,6 +90,10 @@ pub fn get_code_key(account: &Address) -> StorageKey { StorageKey::new(account_code_storage, address_to_h256(account)) } +pub fn get_evm_code_hash_key(account: &Address) -> StorageKey { + get_deployer_key(get_address_mapping_key(account, u256_to_h256(1.into()))) +} + pub fn get_known_code_key(hash: &H256) -> StorageKey { let known_codes_storage = AccountTreeId::new(KNOWN_CODES_STORAGE_ADDRESS); StorageKey::new(known_codes_storage, *hash) @@ -110,6 +114,11 @@ fn get_immutable_simulator_log_key(key: H256) -> StorageKey { StorageKey::new(immutable_simulator, key) } +pub fn get_deployer_key(key: H256) -> StorageKey { + let deployer_contract = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); + StorageKey::new(deployer_contract, key) +} + pub fn get_is_account_key(account: &Address) -> StorageKey { let deployer = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index bce9cc9034d..7f3195af873 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct WitnessStorageState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index 4caf81fd0cf..643aa56a1f1 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -1,11 +1,10 @@ use std::path::PathBuf; -use once_cell::sync::Lazy; use zksync_basic_types::{AccountTreeId, Address, U256}; use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage, SystemContractsRepo}; use zksync_system_constants::{ BOOTLOADER_UTILITIES_ADDRESS, CODE_ORACLE_ADDRESS, COMPRESSOR_ADDRESS, CREATE2_FACTORY_ADDRESS, - EVENT_WRITER_ADDRESS, L2_ASSET_ROUTER_ADDRESS, L2_BRIDGEHUB_ADDRESS, + EVENT_WRITER_ADDRESS, EVM_GAS_MANAGER_ADDRESS, L2_ASSET_ROUTER_ADDRESS, L2_BRIDGEHUB_ADDRESS, L2_GENESIS_UPGRADE_ADDRESS, L2_MESSAGE_ROOT_ADDRESS, L2_NATIVE_TOKEN_VAULT_ADDRESS, P256VERIFY_PRECOMPILE_ADDRESS, PUBDATA_CHUNK_PUBLISHER_ADDRESS, }; @@ -27,7 +26,7 @@ use crate::{ pub const TX_NONCE_INCREMENT: U256 = U256([1, 0, 0, 0]); // 1 pub const DEPLOYMENT_NONCE_INCREMENT: U256 = U256([0, 0, 1, 0]); // 2^128 -static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 30] = [ +static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 31] = [ ( "", "AccountCodeStorage", @@ -149,6 +148,12 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 30] = [ COMPLEX_UPGRADER_ADDRESS, ContractLanguage::Sol, ), + ( + "", + "EvmGasManager", + EVM_GAS_MANAGER_ADDRESS, + ContractLanguage::Yul, + ), // For now, only zero address and the bootloader address have empty bytecode at the init // In the future, we might want to set all of the system contracts this way. ("", "EmptyContract", Address::zero(), ContractLanguage::Sol), @@ -202,29 +207,40 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 30] = [ ), ]; -static SYSTEM_CONTRACTS: Lazy> = Lazy::new(|| { +/// Gets default set of system contracts, based on Cargo workspace location. +pub fn get_system_smart_contracts(use_evm_emulator: bool) -> Vec { SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) - .collect::>() -}); - -/// Gets default set of system contracts, based on Cargo workspace location. -pub fn get_system_smart_contracts() -> Vec { - SYSTEM_CONTRACTS.clone() + .collect() } /// Loads system contracts from a given directory. -pub fn get_system_smart_contracts_from_dir(path: PathBuf) -> Vec { +pub fn get_system_smart_contracts_from_dir( + path: PathBuf, + use_evm_emulator: bool, +) -> Vec { let repo = SystemContractsRepo { root: path }; SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) .collect::>() } diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 5f26b1d6a6a..a8713f301ba 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -809,6 +809,7 @@ impl TransactionRequest { impl L2Tx { pub(crate) fn from_request_unverified( mut value: TransactionRequest, + allow_no_target: bool, ) -> Result { let fee = value.get_fee_data_checked()?; let nonce = value.get_nonce_checked()?; @@ -817,8 +818,7 @@ impl L2Tx { let meta = value.eip712_meta.take().unwrap_or_default(); validate_factory_deps(&meta.factory_deps)?; - // TODO: Remove this check when evm equivalence gets enabled - if value.to.is_none() { + if value.to.is_none() && !allow_no_target { return Err(SerializationTransactionError::ToAddressIsNull); } @@ -848,11 +848,18 @@ impl L2Tx { Ok(tx) } + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. pub fn from_request( - value: TransactionRequest, + request: TransactionRequest, max_tx_size: usize, + allow_no_target: bool, ) -> Result { - let tx = Self::from_request_unverified(value)?; + let tx = Self::from_request_unverified(request, allow_no_target)?; tx.check_encoded_size(max_tx_size)?; Ok(tx) } @@ -916,11 +923,19 @@ impl From for TransactionRequest { } } -impl TryFrom for L1Tx { - type Error = SerializationTransactionError; - fn try_from(tx: CallRequest) -> Result { +impl L1Tx { + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_request( + request: CallRequest, + allow_no_target: bool, + ) -> Result { // L1 transactions have no limitations on the transaction size. - let tx: L2Tx = L2Tx::from_request(tx.into(), MAX_ENCODED_TX_SIZE)?; + let tx: L2Tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE, allow_no_target)?; // Note, that while the user has theoretically provided the fee for ETH on L1, // the payment to the operator as well as refunds happen on L2 and so all the ETH @@ -1316,7 +1331,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert!(execute_tx1.is_ok()); let tx2 = TransactionRequest { @@ -1327,7 +1342,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::TooBigNonce @@ -1344,7 +1359,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert_eq!( execute_tx1.unwrap_err(), SerializationTransactionError::MaxFeePerGasNotU64 @@ -1358,7 +1373,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::MaxPriorityFeePerGasNotU64 @@ -1376,7 +1391,7 @@ mod tests { }; let execute_tx3: Result = - L2Tx::from_request(tx3, usize::MAX); + L2Tx::from_request(tx3, usize::MAX, true); assert_eq!( execute_tx3.unwrap_err(), SerializationTransactionError::MaxFeePerPubdataByteNotU64 @@ -1432,7 +1447,7 @@ mod tests { let request = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); assert_matches!( - L2Tx::from_request(request.0, random_tx_max_size), + L2Tx::from_request(request.0, random_tx_max_size, true), Err(SerializationTransactionError::OversizedData(_, _)) ) } @@ -1458,7 +1473,7 @@ mod tests { }; let try_to_l2_tx: Result = - L2Tx::from_request(call_request.into(), random_tx_max_size); + L2Tx::from_request(call_request.into(), random_tx_max_size, true); assert_matches!( try_to_l2_tx, @@ -1483,15 +1498,20 @@ mod tests { access_list: None, eip712_meta: None, }; - let l2_tx = L2Tx::from_request(call_request_with_nonce.clone().into(), MAX_ENCODED_TX_SIZE) - .unwrap(); + let l2_tx = L2Tx::from_request( + call_request_with_nonce.clone().into(), + MAX_ENCODED_TX_SIZE, + true, + ) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(123u32)); let mut call_request_without_nonce = call_request_with_nonce; call_request_without_nonce.nonce = None; let l2_tx = - L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE).unwrap(); + L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE, true) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(0u32)); } diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index c133261bc23..0edece9e46b 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -1,6 +1,7 @@ use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use zksync_utils::ZeroPrefixHexSerde; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_utils::{bytecode::hash_bytecode, ZeroPrefixHexSerde}; use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; @@ -89,8 +90,7 @@ impl Execute { &self.calldata } - /// Prepares calldata to invoke deployer contract. - /// This method encodes parameters for the `create` method. + /// Prepares calldata to invoke deployer contract. This method encodes parameters for the `create` method. pub fn encode_deploy_params_create( salt: H256, contract_hash: H256, @@ -116,4 +116,24 @@ impl Execute { FUNCTION_SIGNATURE.iter().copied().chain(params).collect() } + + /// Creates an instance for deploying the specified bytecode without additional dependencies. If necessary, + /// additional deps can be added to `Self.factory_deps` after this call. + pub fn for_deploy( + salt: H256, + contract_bytecode: Vec, + constructor_input: &[ethabi::Token], + ) -> Self { + let bytecode_hash = hash_bytecode(&contract_bytecode); + Self { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata: Self::encode_deploy_params_create( + salt, + bytecode_hash, + ethabi::encode(constructor_input), + ), + value: 0.into(), + factory_deps: vec![contract_bytecode], + } + } } diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index 48bdb433020..01cce5bc34d 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -1,5 +1,6 @@ // FIXME: move to basic_types? +use zk_evm::k256::sha2::{Digest, Sha256}; use zksync_basic_types::H256; use crate::bytes_to_chunks; @@ -40,6 +41,7 @@ pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { Ok(()) } +/// Hashes the provided EraVM bytecode. pub fn hash_bytecode(code: &[u8]) -> H256 { let chunked_code = bytes_to_chunks(code); let hash = zk_evm::zkevm_opcode_defs::utils::bytecode_to_code_hash(&chunked_code) @@ -55,3 +57,62 @@ pub fn bytecode_len_in_words(bytecodehash: &H256) -> u16 { pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { bytecode_len_in_words(&bytecodehash) as usize * 32 } + +/// Bytecode marker encoded in the first byte of the bytecode hash. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u8)] +pub enum BytecodeMarker { + /// EraVM bytecode marker (1). + EraVm = 1, + /// EVM bytecode marker (2). + Evm = 2, +} + +impl BytecodeMarker { + /// Parses a marker from the bytecode hash. + pub fn new(bytecode_hash: H256) -> Option { + Some(match bytecode_hash.as_bytes()[0] { + val if val == Self::EraVm as u8 => Self::EraVm, + val if val == Self::Evm as u8 => Self::Evm, + _ => return None, + }) + } +} + +/// Hashes the provided EVM bytecode. The bytecode must be padded to an odd number of 32-byte words; +/// bytecodes stored in the known codes storage satisfy this requirement automatically. +pub fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { + validate_bytecode(bytecode).expect("invalid EVM bytecode"); + + let mut hasher = Sha256::new(); + let len = bytecode.len() as u16; + hasher.update(bytecode); + let result = hasher.finalize(); + + let mut output = [0u8; 32]; + output[..].copy_from_slice(result.as_slice()); + output[0] = BytecodeMarker::Evm as u8; + output[1] = 0; + output[2..4].copy_from_slice(&len.to_be_bytes()); + + H256(output) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytecode_markers_are_valid() { + let bytecode_hash = hash_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::EraVm) + ); + let bytecode_hash = hash_evm_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::Evm) + ); + } +} diff --git a/core/lib/utils/src/env.rs b/core/lib/utils/src/env.rs index 5ae07caf148..8f4aa1da940 100644 --- a/core/lib/utils/src/env.rs +++ b/core/lib/utils/src/env.rs @@ -19,8 +19,8 @@ pub enum Workspace<'a> { Core(&'a Path), /// `prover` folder. Prover(&'a Path), - /// `toolbox` folder. - Toolbox(&'a Path), + /// ZK Stack CLI folder. + ZkStackCli(&'a Path), } impl Workspace<'static> { @@ -48,7 +48,7 @@ impl Workspace<'static> { impl<'a> Workspace<'a> { const PROVER_DIRECTORY_NAME: &'static str = "prover"; - const TOOLBOX_DIRECTORY_NAME: &'static str = "zk_toolbox"; + const ZKSTACK_CLI_DIRECTORY_NAME: &'static str = "zkstack_cli"; /// Returns the path of the core workspace. /// For `Workspace::None`, considers the current directory to represent core workspace. @@ -56,7 +56,7 @@ impl<'a> Workspace<'a> { match self { Self::None => PathBuf::from("."), Self::Core(path) => path.into(), - Self::Prover(path) | Self::Toolbox(path) => path.parent().unwrap().into(), + Self::Prover(path) | Self::ZkStackCli(path) => path.parent().unwrap().into(), } } @@ -68,11 +68,11 @@ impl<'a> Workspace<'a> { } } - /// Returns the path of the `zk_toolbox`` workspace. - pub fn toolbox(self) -> PathBuf { + /// Returns the path of the ZK Stack CLI workspace. + pub fn zkstack_cli(self) -> PathBuf { match self { - Self::Toolbox(path) => path.into(), - _ => self.core().join(Self::TOOLBOX_DIRECTORY_NAME), + Self::ZkStackCli(path) => path.into(), + _ => self.core().join(Self::ZKSTACK_CLI_DIRECTORY_NAME), } } } @@ -81,8 +81,8 @@ impl<'a> From<&'a Path> for Workspace<'a> { fn from(path: &'a Path) -> Self { if path.ends_with(Self::PROVER_DIRECTORY_NAME) { Self::Prover(path) - } else if path.ends_with(Self::TOOLBOX_DIRECTORY_NAME) { - Self::Toolbox(path) + } else if path.ends_with(Self::ZKSTACK_CLI_DIRECTORY_NAME) { + Self::ZkStackCli(path) } else { Self::Core(path) } @@ -154,16 +154,16 @@ mod tests { let workspace = Workspace::locate(); assert_matches!(workspace, Workspace::Core(_)); let core_path = workspace.core(); - // Check if prover and toolbox directories exist. + // Check if prover and ZK Stack CLI directories exist. assert!(workspace.prover().exists()); assert_matches!( Workspace::from(workspace.prover().as_path()), Workspace::Prover(_) ); - assert!(workspace.toolbox().exists()); + assert!(workspace.zkstack_cli().exists()); assert_matches!( - Workspace::from(workspace.toolbox().as_path()), - Workspace::Toolbox(_) + Workspace::from(workspace.zkstack_cli().as_path()), + Workspace::ZkStackCli(_) ); // Prover. @@ -181,17 +181,17 @@ mod tests { Workspace::from(workspace.core().as_path()), Workspace::Core(_) ); - assert!(workspace.toolbox().exists()); + assert!(workspace.zkstack_cli().exists()); assert_matches!( - Workspace::from(workspace.toolbox().as_path()), - Workspace::Toolbox(_) + Workspace::from(workspace.zkstack_cli().as_path()), + Workspace::ZkStackCli(_) ); - // Toolbox. - std::env::set_current_dir(workspace.toolbox()).unwrap(); + // ZK Stack CLI + std::env::set_current_dir(workspace.zkstack_cli()).unwrap(); let workspace_path = locate_workspace_inner().unwrap(); let workspace = Workspace::from(workspace_path.as_path()); - assert_matches!(workspace, Workspace::Toolbox(_)); + assert_matches!(workspace, Workspace::ZkStackCli(_)); assert_eq!(workspace.core(), core_path); assert_matches!( Workspace::from(workspace.core().as_path()), diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index 089c2a9bcca..a967aaa969a 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -23,3 +23,6 @@ tokio.workspace = true anyhow.workspace = true tracing.workspace = true vise.workspace = true + +[dev-dependencies] +assert_matches.workspace = true diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 146f0bb4e5c..5877922b333 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -6,18 +6,21 @@ use tokio::sync::mpsc; use zksync_multivm::{ interface::{ executor::{BatchExecutor, BatchExecutorFactory}, + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView, StorageViewStats}, utils::DivergenceHandler, BatchTransactionExecutionResult, BytecodeCompressionError, CompressedBytecodeInfo, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, + is_supported_by_fast_vm, + pubdata_builders::pubdata_params_to_builder, tracers::CallTracer, vm_fast, vm_latest::HistoryEnabled, FastVmInstance, LegacyVmInstance, MultiVMTracer, }; -use zksync_types::{vm::FastVmMode, Transaction}; +use zksync_types::{commitment::PubdataParams, vm::FastVmMode, Transaction}; use super::{ executor::{Command, MainBatchExecutor}, @@ -115,6 +118,7 @@ impl BatchExecutorFactory storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { // Since we process `BatchExecutor` commands one-by-one (the next command is never enqueued // until a previous command is processed), capacity 1 is enough for the commands channel. @@ -129,8 +133,14 @@ impl BatchExecutorFactory _tracer: PhantomData::, }; - let handle = - tokio::task::spawn_blocking(move || executor.run(storage, l1_batch_params, system_env)); + let handle = tokio::task::spawn_blocking(move || { + executor.run( + storage, + l1_batch_params, + system_env, + pubdata_params_to_builder(pubdata_params), + ) + }); Box::new(MainBatchExecutor::new(handle, commands_sender)) } } @@ -159,6 +169,10 @@ impl BatchVm { storage_ptr: StoragePtr>, mode: FastVmMode, ) -> Self { + if !is_supported_by_fast_vm(system_env.version) { + return Self::Legacy(LegacyVmInstance::new(l1_batch_env, system_env, storage_ptr)); + } + match mode { FastVmMode::Old => { Self::Legacy(LegacyVmInstance::new(l1_batch_env, system_env, storage_ptr)) @@ -178,8 +192,8 @@ impl BatchVm { dispatch_batch_vm!(self.start_new_l2_block(l2_block)); } - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_batch_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_batch_vm!(self.finish_batch(pubdata_builder)) } fn make_snapshot(&mut self) { @@ -255,6 +269,7 @@ impl CommandReceiver { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_builder: Rc, ) -> anyhow::Result> { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); @@ -305,7 +320,7 @@ impl CommandReceiver { } } Command::FinishBatch(resp) => { - let vm_block_result = self.finish_batch(&mut vm)?; + let vm_block_result = self.finish_batch(&mut vm, pubdata_builder)?; if resp.send(vm_block_result).is_err() { break; } @@ -360,10 +375,14 @@ impl CommandReceiver { latency.observe(); } - fn finish_batch(&self, vm: &mut BatchVm) -> anyhow::Result { + fn finish_batch( + &self, + vm: &mut BatchVm, + pubdata_builder: Rc, + ) -> anyhow::Result { // The vm execution was paused right after the last transaction was executed. // There is some post-processing work that the VM needs to do before the block is fully processed. - let result = vm.finish_batch(); + let result = vm.finish_batch(pubdata_builder); anyhow::ensure!( !result.block_tip_execution_result.result.is_failed(), "VM must not fail when finalizing block: {:#?}", @@ -443,3 +462,50 @@ impl CommandReceiver { } } } + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use zksync_multivm::interface::{storage::InMemoryStorage, TxExecutionMode}; + use zksync_types::ProtocolVersionId; + + use super::*; + use crate::testonly::{default_l1_batch_env, default_system_env, FAST_VM_MODES}; + + #[test] + fn selecting_vm_for_execution() { + let l1_batch_env = default_l1_batch_env(1); + let mut system_env = SystemEnv { + version: ProtocolVersionId::Version22, + ..default_system_env(TxExecutionMode::VerifyExecute) + }; + let storage = StorageView::new(InMemoryStorage::default()).to_rc_ptr(); + for mode in FAST_VM_MODES { + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + mode, + ); + assert_matches!(vm, BatchVm::Legacy(_)); + } + + system_env.version = ProtocolVersionId::latest(); + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + FastVmMode::Old, + ); + assert_matches!(vm, BatchVm::Legacy(_)); + // let vm = BatchVm::<_, ()>::new( + // l1_batch_env.clone(), + // system_env.clone(), + // storage.clone(), + // FastVmMode::New, + // ); + // assert_matches!(vm, BatchVm::Fast(FastVmInstance::Fast(_))); + // let vm = BatchVm::<_, ()>::new(l1_batch_env, system_env, storage, FastVmMode::Shadow); + // assert_matches!(vm, BatchVm::Fast(FastVmInstance::Shadowed(_))); + } +} diff --git a/core/lib/vm_executor/src/lib.rs b/core/lib/vm_executor/src/lib.rs index 1a0fbb002df..83edb77fd62 100644 --- a/core/lib/vm_executor/src/lib.rs +++ b/core/lib/vm_executor/src/lib.rs @@ -9,3 +9,5 @@ pub mod batch; pub mod oneshot; mod shared; pub mod storage; +#[cfg(test)] +mod testonly; diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index cab64289e5e..d6118f15b98 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -7,7 +7,6 @@ use zksync_multivm::{ use zksync_types::{ api, block::{unpack_block_info, L2BlockHasher}, - commitment::PubdataParams, fee_model::BatchFeeInput, AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, @@ -15,7 +14,7 @@ use zksync_types::{ }; use zksync_utils::{h256_to_u256, time::seconds_since_epoch}; -use super::env::OneshotEnvParameters; +use super::{env::OneshotEnvParameters, ContractsKind}; /// Block information necessary to execute a transaction / call. Unlike [`ResolvedBlockInfo`], this information is *partially* resolved, /// which is beneficial for some data workflows. @@ -134,29 +133,34 @@ impl BlockInfo { let protocol_version = l2_block_header .protocol_version .unwrap_or(ProtocolVersionId::last_potentially_undefined()); - + // We cannot use the EVM emulator mentioned in the block as is because of batch vs playground settings etc. + // Instead, we just check whether EVM emulation in general is enabled for a block, and store this binary flag for further use. + let use_evm_emulator = l2_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some(); Ok(ResolvedBlockInfo { state_l2_block_number, state_l2_block_hash: l2_block_header.hash, vm_l1_batch_number, l1_batch_timestamp, protocol_version, + use_evm_emulator, is_pending: self.is_pending_l2_block(), - pubdata_params: l2_block_header.pubdata_params, }) } } /// Resolved [`BlockInfo`] containing additional data from VM state. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ResolvedBlockInfo { state_l2_block_number: L2BlockNumber, state_l2_block_hash: H256, vm_l1_batch_number: L1BatchNumber, l1_batch_timestamp: u64, protocol_version: ProtocolVersionId, + use_evm_emulator: bool, is_pending: bool, - pubdata_params: PubdataParams, } impl ResolvedBlockInfo { @@ -164,9 +168,17 @@ impl ResolvedBlockInfo { pub fn state_l2_block_number(&self) -> L2BlockNumber { self.state_l2_block_number } + + pub fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version + } + + pub fn use_evm_emulator(&self) -> bool { + self.use_evm_emulator + } } -impl OneshotEnvParameters { +impl OneshotEnvParameters { pub(super) async fn to_env_inner( &self, connection: &mut Connection<'_, Core>, @@ -182,13 +194,16 @@ impl OneshotEnvParameters { ) .await?; - let (system, l1_batch) = self.prepare_env( - execution_mode, - resolved_block_info, - next_block, - fee_input, - enforced_base_fee, - ); + let (system, l1_batch) = self + .prepare_env( + execution_mode, + resolved_block_info, + next_block, + fee_input, + enforced_base_fee, + ) + .await?; + Ok(OneshotEnv { system, l1_batch, @@ -196,14 +211,14 @@ impl OneshotEnvParameters { }) } - fn prepare_env( + async fn prepare_env( &self, execution_mode: TxExecutionMode, resolved_block_info: &ResolvedBlockInfo, next_block: L2BlockEnv, fee_input: BatchFeeInput, enforced_base_fee: Option, - ) -> (SystemEnv, L1BatchEnv) { + ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { let &Self { operator_account, validation_computational_gas_limit, @@ -216,13 +231,13 @@ impl OneshotEnvParameters { version: resolved_block_info.protocol_version, base_system_smart_contracts: self .base_system_contracts - .get_by_protocol_version(resolved_block_info.protocol_version) - .clone(), + .base_system_contracts(resolved_block_info) + .await + .context("failed getting base system contracts")?, bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, execution_mode, default_validation_computational_gas_limit: validation_computational_gas_limit, chain_id, - pubdata_params: resolved_block_info.pubdata_params, }; let l1_batch_env = L1BatchEnv { previous_batch_hash: None, @@ -233,7 +248,7 @@ impl OneshotEnvParameters { enforced_base_fee, first_l2_block: next_block, }; - (system_env, l1_batch_env) + Ok((system_env, l1_batch_env)) } } diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index 2a9204f7d0a..6f9f021345c 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -1,9 +1,52 @@ +use std::{fmt, marker::PhantomData}; + +use async_trait::async_trait; use zksync_contracts::BaseSystemContracts; use zksync_types::ProtocolVersionId; +use super::ResolvedBlockInfo; +use crate::shared::Sealed; + +/// Kind of base system contracts used as a marker in the [`BaseSystemContractsProvider`] trait. +pub trait ContractsKind: fmt::Debug + Sealed {} + +/// Marker for [`BaseSystemContracts`] used for gas estimation. +#[derive(Debug)] +pub struct EstimateGas(()); + +impl Sealed for EstimateGas {} +impl ContractsKind for EstimateGas {} + +/// Marker for [`BaseSystemContracts`] used for calls and transaction execution. +#[derive(Debug)] +pub struct CallOrExecute(()); + +impl Sealed for CallOrExecute {} +impl ContractsKind for CallOrExecute {} + +/// Provider of [`BaseSystemContracts`] for oneshot execution. +/// +/// The main implementation of this trait is [`MultiVMBaseSystemContracts`], which selects contracts +/// based on [`ProtocolVersionId`]. +#[async_trait] +pub trait BaseSystemContractsProvider: fmt::Debug + Send + Sync { + /// Returns base system contracts for executing a transaction on top of the provided block. + /// + /// Implementations are encouraged to cache returned contracts for performance; caching is **not** performed + /// by the caller. + /// + /// # Errors + /// + /// Returned errors are treated as unrecoverable for a particular execution, but further executions are not affected. + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result; +} + /// System contracts (bootloader and default account abstraction) for all supported VM versions. -#[derive(Debug, Clone)] -pub(super) struct MultiVMBaseSystemContracts { +#[derive(Debug)] +pub struct MultiVMBaseSystemContracts { /// Contracts to be used for pre-virtual-blocks protocol versions. pre_virtual_blocks: BaseSystemContracts, /// Contracts to be used for post-virtual-blocks protocol versions. @@ -22,14 +65,21 @@ pub(super) struct MultiVMBaseSystemContracts { vm_1_5_0_small_memory: BaseSystemContracts, /// Contracts to be used after the 1.5.0 upgrade vm_1_5_0_increased_memory: BaseSystemContracts, + /// Contracts to be used after the protocol defense upgrade + vm_protocol_defense: BaseSystemContracts, /// Contracts to be used after the gateway upgrade gateway: BaseSystemContracts, + // We use `fn() -> C` marker so that the `MultiVMBaseSystemContracts` unconditionally implements `Send + Sync`. + _contracts_kind: PhantomData C>, } -impl MultiVMBaseSystemContracts { - /// Gets contracts for a certain version. - pub fn get_by_protocol_version(&self, version: ProtocolVersionId) -> &BaseSystemContracts { - match version { +impl MultiVMBaseSystemContracts { + fn get_by_protocol_version( + &self, + version: ProtocolVersionId, + use_evm_emulator: bool, + ) -> BaseSystemContracts { + let base = match version { ProtocolVersionId::Version0 | ProtocolVersionId::Version1 | ProtocolVersionId::Version2 @@ -54,11 +104,25 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version21 | ProtocolVersionId::Version22 => &self.post_1_4_2, ProtocolVersionId::Version23 => &self.vm_1_5_0_small_memory, ProtocolVersionId::Version24 => &self.vm_1_5_0_increased_memory, - ProtocolVersionId::Version25 | ProtocolVersionId::Version26 => &self.gateway, + ProtocolVersionId::Version25 | ProtocolVersionId::Version26 => { + &self.vm_protocol_defense + } + ProtocolVersionId::Version27 | ProtocolVersionId::Version28 => &self.gateway, + }; + let base = base.clone(); + + if version.is_post_1_5_0() && use_evm_emulator { + // EVM emulator is not versioned now; the latest version is always checked out + base.with_latest_evm_emulator() + } else { + base } } +} - pub(super) fn load_estimate_gas_blocking() -> Self { +impl MultiVMBaseSystemContracts { + /// Returned system contracts (mainly the bootloader) are tuned to provide accurate execution metrics. + pub fn load_estimate_gas_blocking() -> Self { Self { pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), @@ -71,11 +135,16 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), vm_1_5_0_increased_memory: BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + vm_protocol_defense: BaseSystemContracts::estimate_gas_post_protocol_defense(), gateway: BaseSystemContracts::estimate_gas_gateway(), + _contracts_kind: PhantomData, } } +} - pub(super) fn load_eth_call_blocking() -> Self { +impl MultiVMBaseSystemContracts { + /// Returned system contracts (mainly the bootloader) are tuned to provide better UX (e.g. revert messages). + pub fn load_eth_call_blocking() -> Self { Self { pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), @@ -88,7 +157,20 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( ), + vm_protocol_defense: BaseSystemContracts::playground_post_protocol_defense(), gateway: BaseSystemContracts::playground_gateway(), + _contracts_kind: PhantomData, } } } + +#[async_trait] +impl BaseSystemContractsProvider for MultiVMBaseSystemContracts { + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result { + Ok(self + .get_by_protocol_version(block_info.protocol_version(), block_info.use_evm_emulator())) + } +} diff --git a/core/lib/vm_executor/src/oneshot/env.rs b/core/lib/vm_executor/src/oneshot/env.rs index 51154d561ec..6d70c3cfde9 100644 --- a/core/lib/vm_executor/src/oneshot/env.rs +++ b/core/lib/vm_executor/src/oneshot/env.rs @@ -1,19 +1,12 @@ -use std::marker::PhantomData; +use std::sync::Arc; -use anyhow::Context; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{OneshotEnv, TxExecutionMode}; use zksync_types::{fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId}; -use crate::oneshot::{contracts::MultiVMBaseSystemContracts, ResolvedBlockInfo}; - -/// Marker for [`OneshotEnvParameters`] used for gas estimation. -#[derive(Debug)] -pub struct EstimateGas(()); - -/// Marker for [`OneshotEnvParameters`] used for calls and/or transaction execution. -#[derive(Debug)] -pub struct CallOrExecute(()); +use super::{ + BaseSystemContractsProvider, CallOrExecute, ContractsKind, EstimateGas, ResolvedBlockInfo, +}; /// Oneshot environment parameters that are expected to be constant or rarely change during the program lifetime. /// These parameters can be used to create [a full environment](OneshotEnv) for transaction / call execution. @@ -21,15 +14,29 @@ pub struct CallOrExecute(()); /// Notably, these parameters include base system contracts (bootloader and default account abstraction) for all supported /// VM versions. #[derive(Debug)] -pub struct OneshotEnvParameters { +pub struct OneshotEnvParameters { pub(super) chain_id: L2ChainId, - pub(super) base_system_contracts: MultiVMBaseSystemContracts, + pub(super) base_system_contracts: Arc>, pub(super) operator_account: AccountTreeId, pub(super) validation_computational_gas_limit: u32, - _ty: PhantomData, } -impl OneshotEnvParameters { +impl OneshotEnvParameters { + /// Creates env parameters. + pub fn new( + base_system_contracts: Arc>, + chain_id: L2ChainId, + operator_account: AccountTreeId, + validation_computational_gas_limit: u32, + ) -> Self { + Self { + chain_id, + base_system_contracts, + operator_account, + validation_computational_gas_limit, + } + } + /// Returns gas limit for account validation of transactions. pub fn validation_computational_gas_limit(&self) -> u32 { self.validation_computational_gas_limit @@ -37,27 +44,6 @@ impl OneshotEnvParameters { } impl OneshotEnvParameters { - /// Creates env parameters for gas estimation. - /// - /// System contracts (mainly, bootloader) for these params are tuned to provide accurate - /// execution metrics. - pub async fn for_gas_estimation( - chain_id: L2ChainId, - operator_account: AccountTreeId, - ) -> anyhow::Result { - Ok(Self { - chain_id, - base_system_contracts: tokio::task::spawn_blocking( - MultiVMBaseSystemContracts::load_estimate_gas_blocking, - ) - .await - .context("failed loading system contracts for gas estimation")?, - operator_account, - validation_computational_gas_limit: u32::MAX, - _ty: PhantomData, - }) - } - /// Prepares environment for gas estimation. pub async fn to_env( &self, @@ -78,28 +64,6 @@ impl OneshotEnvParameters { } impl OneshotEnvParameters { - /// Creates env parameters for transaction / call execution. - /// - /// System contracts (mainly, bootloader) for these params tuned to provide better UX - /// experience (e.g. revert messages). - pub async fn for_execution( - chain_id: L2ChainId, - operator_account: AccountTreeId, - validation_computational_gas_limit: u32, - ) -> anyhow::Result { - Ok(Self { - chain_id, - base_system_contracts: tokio::task::spawn_blocking( - MultiVMBaseSystemContracts::load_eth_call_blocking, - ) - .await - .context("failed loading system contracts for calls")?, - operator_account, - validation_computational_gas_limit, - _ty: PhantomData, - }) - } - /// Prepares environment for a call. pub async fn to_call_env( &self, diff --git a/core/lib/vm_executor/src/oneshot/mock.rs b/core/lib/vm_executor/src/oneshot/mock.rs index 8f3a12603c1..a7363c633c6 100644 --- a/core/lib/vm_executor/src/oneshot/mock.rs +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -68,6 +68,7 @@ impl MockOneshotExecutor { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }, ) diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index cb75f396b5d..5f9e4dd3c6f 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -19,8 +19,9 @@ use zksync_multivm::{ executor::{OneshotExecutor, TransactionValidator}, storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, tracer::{ValidationError, ValidationParams}, - ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, - StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, VmInterface, + ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, + OneshotTransactionExecutionResult, StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, + VmInterface, }, tracers::{CallTracer, StorageInvocations, ValidationTracer}, utils::adjust_pubdata_price_for_tx, @@ -40,7 +41,11 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; pub use self::{ block::{BlockInfo, ResolvedBlockInfo}, - env::{CallOrExecute, EstimateGas, OneshotEnvParameters}, + contracts::{ + BaseSystemContractsProvider, CallOrExecute, ContractsKind, EstimateGas, + MultiVMBaseSystemContracts, + }, + env::OneshotEnvParameters, mock::MockOneshotExecutor, }; @@ -165,7 +170,7 @@ where ); let exec_result = executor.apply(|vm, transaction| { vm.push_transaction(transaction); - vm.inspect(&mut tracers.into(), VmExecutionMode::OneTx) + vm.inspect(&mut tracers.into(), InspectExecutionMode::OneTx) }); let validation_result = Arc::make_mut(&mut validation_result) .take() diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index 861ee0649b3..e5a2d404233 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -54,7 +54,6 @@ pub fn l1_batch_params( protocol_version: ProtocolVersionId, virtual_blocks: u32, chain_id: L2ChainId, - pubdata_params: PubdataParams, ) -> (SystemEnv, L1BatchEnv) { ( SystemEnv { @@ -65,7 +64,6 @@ pub fn l1_batch_params( execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: validation_computational_gas_limit, chain_id, - pubdata_params, }, L1BatchEnv { previous_batch_hash: Some(previous_batch_hash), @@ -266,7 +264,7 @@ impl L1BatchParamsProvider { first_l2_block_in_batch: &FirstL2BlockInBatch, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { + ) -> anyhow::Result<(SystemEnv, L1BatchEnv, PubdataParams)> { anyhow::ensure!( first_l2_block_in_batch.l1_batch_number > L1BatchNumber(0), "Loading params for genesis L1 batch not supported" @@ -312,11 +310,15 @@ impl L1BatchParamsProvider { let contract_hashes = first_l2_block_in_batch.header.base_system_contracts_hashes; let base_system_contracts = storage .factory_deps_dal() - .get_base_system_contracts(contract_hashes.bootloader, contract_hashes.default_aa) + .get_base_system_contracts( + contract_hashes.bootloader, + contract_hashes.default_aa, + contract_hashes.evm_emulator, + ) .await .context("failed getting base system contracts")?; - Ok(l1_batch_params( + let (system_env, l1_batch_env) = l1_batch_params( first_l2_block_in_batch.l1_batch_number, first_l2_block_in_batch.header.fee_account_address, l1_batch_timestamp, @@ -332,13 +334,12 @@ impl L1BatchParamsProvider { .context("`protocol_version` must be set for L2 block")?, first_l2_block_in_batch.header.virtual_blocks, chain_id, - PubdataParams { - l2_da_validator_address: first_l2_block_in_batch - .header - .pubdata_params - .l2_da_validator_address, - pubdata_type: first_l2_block_in_batch.header.pubdata_params.pubdata_type, - }, + ); + + Ok(( + system_env, + l1_batch_env, + first_l2_block_in_batch.header.pubdata_params, )) } @@ -352,7 +353,7 @@ impl L1BatchParamsProvider { number: L1BatchNumber, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let first_l2_block = self .load_first_l2_block_in_batch(storage, number) .await diff --git a/core/lib/vm_executor/src/testonly.rs b/core/lib/vm_executor/src/testonly.rs new file mode 100644 index 00000000000..5bcd604a432 --- /dev/null +++ b/core/lib/vm_executor/src/testonly.rs @@ -0,0 +1,45 @@ +use once_cell::sync::Lazy; +use zksync_contracts::BaseSystemContracts; +use zksync_multivm::{ + interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, vm::FastVmMode, Address, L1BatchNumber, + L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, +}; + +static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +pub(crate) const FAST_VM_MODES: [FastVmMode; 3] = + [FastVmMode::Old, FastVmMode::New, FastVmMode::Shadow]; + +pub(crate) fn default_system_env(execution_mode: TxExecutionMode) -> SystemEnv { + SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BASE_SYSTEM_CONTRACTS.clone(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::default(), + } +} + +pub(crate) fn default_l1_batch_env(number: u32) -> L1BatchEnv { + L1BatchEnv { + previous_batch_hash: Some(H256::zero()), + number: L1BatchNumber(number), + timestamp: number.into(), + fee_account: Address::repeat_byte(0x22), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number, + timestamp: number.into(), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(number - 1)), + max_virtual_blocks_to_create: 1, + }, + fee_input: BatchFeeInput::sensible_l1_pegged_default(), + } +} diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs index 119f975fecd..60522ba338a 100644 --- a/core/lib/vm_interface/src/executor.rs +++ b/core/lib/vm_interface/src/executor.rs @@ -3,7 +3,7 @@ use std::fmt; use async_trait::async_trait; -use zksync_types::{l2::L2Tx, Transaction}; +use zksync_types::{commitment::PubdataParams, l2::L2Tx, Transaction}; use crate::{ storage::{ReadStorage, StorageView}, @@ -20,6 +20,7 @@ pub trait BatchExecutorFactory: 'static + Send + fmt::Debug { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box>; } diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 645e3e7c856..39f949e5d8a 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -24,16 +24,16 @@ pub use crate::{ VmRevertReason, VmRevertReasonParsingError, }, inputs::{ - L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, StoredL2BlockEnv, SystemEnv, - TxExecutionArgs, TxExecutionMode, VmExecutionMode, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, + StoredL2BlockEnv, SystemEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, }, outputs::{ BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, - ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, Refunds, - TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, - VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, - VmMemoryMetrics, + ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, + PushTransactionResult, Refunds, TransactionExecutionMetrics, + TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, + VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, }, tracer, }, @@ -41,6 +41,7 @@ pub use crate::{ }; pub mod executor; +pub mod pubdata; pub mod storage; mod types; pub mod utils; diff --git a/core/lib/vm_interface/src/pubdata/mod.rs b/core/lib/vm_interface/src/pubdata/mod.rs new file mode 100644 index 00000000000..f901687b5fa --- /dev/null +++ b/core/lib/vm_interface/src/pubdata/mod.rs @@ -0,0 +1,90 @@ +use zksync_types::{ + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, ProtocolVersionId, H256, U256, +}; + +/// Corresponds to the following solidity event: +/// ```solidity +/// struct L2ToL1Log { +/// uint8 l2ShardId; +/// bool isService; +/// uint16 txNumberInBlock; +/// address sender; +/// bytes32 key; +/// bytes32 value; +/// } +/// ``` +#[derive(Debug, Default, Clone, PartialEq)] +pub struct L1MessengerL2ToL1Log { + pub l2_shard_id: u8, + pub is_service: bool, + pub tx_number_in_block: u16, + pub sender: Address, + pub key: U256, + pub value: U256, +} + +impl L1MessengerL2ToL1Log { + pub fn packed_encoding(&self) -> Vec { + /// Converts `U256` value into bytes array + fn u256_to_bytes_be(value: &U256) -> Vec { + let mut bytes = vec![0u8; 32]; + value.to_big_endian(bytes.as_mut_slice()); + bytes + } + + let mut res: Vec = vec![]; + res.push(self.l2_shard_id); + res.push(self.is_service as u8); + res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); + res.extend_from_slice(self.sender.as_bytes()); + res.extend(u256_to_bytes_be(&self.key)); + res.extend(u256_to_bytes_be(&self.value)); + res + } +} + +impl From for L2ToL1Log { + fn from(log: L1MessengerL2ToL1Log) -> Self { + fn u256_to_h256(num: U256) -> H256 { + let mut bytes = [0u8; 32]; + num.to_big_endian(&mut bytes); + H256::from_slice(&bytes) + } + + L2ToL1Log { + shard_id: log.l2_shard_id, + is_service: log.is_service, + tx_number_in_block: log.tx_number_in_block, + sender: log.sender, + key: u256_to_h256(log.key), + value: u256_to_h256(log.value), + } + } +} + +/// Struct based on which the pubdata blob is formed +#[derive(Debug, Clone, Default)] +pub struct PubdataInput { + pub user_logs: Vec, + pub l2_to_l1_messages: Vec>, + pub published_bytecodes: Vec>, + pub state_diffs: Vec, +} + +/// Trait that encapsulates pubdata building logic. It is implemented for rollup and validium cases. +/// If chains needs custom pubdata format then another implementation should be added. +pub trait PubdataBuilder: std::fmt::Debug { + fn l2_da_validator(&self) -> Address; + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; +} diff --git a/core/lib/vm_interface/src/storage/in_memory.rs b/core/lib/vm_interface/src/storage/in_memory.rs index 9e3e3203d01..6bd1dc8d552 100644 --- a/core/lib/vm_interface/src/storage/in_memory.rs +++ b/core/lib/vm_interface/src/storage/in_memory.rs @@ -36,7 +36,7 @@ impl InMemoryStorage { Self::with_custom_system_contracts_and_chain_id( chain_id, bytecode_hasher, - get_system_smart_contracts(), + get_system_smart_contracts(false), ) } diff --git a/core/lib/vm_interface/src/storage/view.rs b/core/lib/vm_interface/src/storage/view.rs index ec9267609e2..249d584c9f6 100644 --- a/core/lib/vm_interface/src/storage/view.rs +++ b/core/lib/vm_interface/src/storage/view.rs @@ -102,6 +102,16 @@ impl StorageView { pub fn cache(&self) -> StorageViewCache { self.cache.clone() } + + /// Provides mutable access to the underlying storage. + /// + /// # Warning + /// + /// Mutating the underlying storage directly can easily break implied `StorageView` invariants, so use with care. + #[doc(hidden)] + pub fn inner_mut(&mut self) -> &mut S { + &mut self.storage_handle + } } impl ReadStorage for Box diff --git a/core/lib/vm_interface/src/types/inputs/execution_mode.rs b/core/lib/vm_interface/src/types/inputs/execution_mode.rs index 41492af6edc..f091a259d30 100644 --- a/core/lib/vm_interface/src/types/inputs/execution_mode.rs +++ b/core/lib/vm_interface/src/types/inputs/execution_mode.rs @@ -13,3 +13,22 @@ pub enum VmExecutionMode { /// Stop after executing the entire bootloader. But before you exit the bootloader. Bootloader, } + +/// Subset of `VmExecutionMode` variants that do not require any additional input +/// and can be invoked with `inspect` method. +#[derive(Debug, Copy, Clone)] +pub enum InspectExecutionMode { + /// Stop after executing the next transaction. + OneTx, + /// Stop after executing the entire bootloader. But before you exit the bootloader. + Bootloader, +} + +impl From for VmExecutionMode { + fn from(mode: InspectExecutionMode) -> Self { + match mode { + InspectExecutionMode::Bootloader => Self::Bootloader, + InspectExecutionMode::OneTx => Self::OneTx, + } + } +} diff --git a/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs b/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs index dbc94247617..0011f0b138b 100644 --- a/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs +++ b/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs @@ -1,5 +1,8 @@ use serde::{Deserialize, Serialize}; -use zksync_types::{fee_model::BatchFeeInput, Address, L1BatchNumber, H256}; +use zksync_types::{ + block::UnsealedL1BatchHeader, fee_model::BatchFeeInput, Address, L1BatchNumber, + ProtocolVersionId, H256, +}; use super::L2BlockEnv; @@ -21,3 +24,18 @@ pub struct L1BatchEnv { pub enforced_base_fee: Option, pub first_l2_block: L2BlockEnv, } + +impl L1BatchEnv { + pub fn into_unsealed_header( + self, + protocol_version: Option, + ) -> UnsealedL1BatchHeader { + UnsealedL1BatchHeader { + number: self.number, + timestamp: self.timestamp, + protocol_version, + fee_address: self.fee_account, + fee_input: self.fee_input, + } + } +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 24f58ae72f1..cb80ba7c138 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -3,7 +3,7 @@ use zksync_types::{ }; pub use self::{ - execution_mode::VmExecutionMode, + execution_mode::{InspectExecutionMode, VmExecutionMode}, l1_batch_env::L1BatchEnv, l2_block::{L2BlockEnv, StoredL2BlockEnv}, system_env::{SystemEnv, TxExecutionMode}, diff --git a/core/lib/vm_interface/src/types/inputs/system_env.rs b/core/lib/vm_interface/src/types/inputs/system_env.rs index 67d555f9bc0..5a0496752d5 100644 --- a/core/lib/vm_interface/src/types/inputs/system_env.rs +++ b/core/lib/vm_interface/src/types/inputs/system_env.rs @@ -2,7 +2,7 @@ use std::fmt::Debug; use serde::{Deserialize, Serialize}; use zksync_contracts::BaseSystemContracts; -use zksync_types::{commitment::PubdataParams, L2ChainId, ProtocolVersionId}; +use zksync_types::{L2ChainId, ProtocolVersionId}; /// Params related to the execution process, not batch it self #[derive(Clone, PartialEq, Serialize, Deserialize)] @@ -15,7 +15,6 @@ pub struct SystemEnv { pub execution_mode: TxExecutionMode, pub default_validation_computational_gas_limit: u32, pub chain_id: L2ChainId, - pub pubdata_params: PubdataParams, } impl Debug for SystemEnv { @@ -34,7 +33,6 @@ impl Debug for SystemEnv { ) .field("execution_mode", &self.execution_mode) .field("chain_id", &self.chain_id) - .field("pubdata_params", &self.pubdata_params) .finish() } } diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 3e53aad85f1..018ea075db5 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, @@ -118,6 +120,10 @@ pub struct VmExecutionResultAndLogs { pub logs: VmExecutionLogs, pub statistics: VmExecutionStatistics, pub refunds: Refunds, + /// Bytecodes decommitted during VM execution. `None` if not computed by the VM. + // FIXME: currently, this is only filled up by `vm_latest`; probably makes sense to narrow down + // to *dynamic* factory deps, so that `HashMap::new()` is a valid value for VMs not supporting EVM emulation. + pub new_known_factory_deps: Option>>, } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs index 27241c2c0fa..8f7c1d4fb0d 100644 --- a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs +++ b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs @@ -26,6 +26,7 @@ impl FinishedL1Batch { logs: VmExecutionLogs::default(), statistics: VmExecutionStatistics::default(), refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: vec![], diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index 1fa1cd5d168..fe25801dd12 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + pub use self::{ bytecode::CompressedBytecodeInfo, execution_result::{ @@ -20,3 +22,14 @@ mod execution_state; mod finished_l1batch; mod l2_block; mod statistic; + +/// Result of pushing a transaction to the VM state without executing it. +#[derive(Debug)] +pub struct PushTransactionResult<'a> { + /// Compressed bytecodes for the transaction. If the VM doesn't support bytecode compression, returns + /// an empty slice. + /// + /// Importantly, these bytecodes are not guaranteed to be published by the transaction; + /// e.g., it may run out of gas during publication. + pub compressed_bytecodes: Cow<'a, [CompressedBytecodeInfo]>, +} diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index 095547076d4..f8e3851c832 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -109,7 +109,8 @@ pub struct VmExecutionStatistics { pub circuit_statistic: CircuitStatistic, } -/// Oracle metrics of the VM. +/// Oracle metrics reported by legacy VMs. +#[derive(Debug, Default)] pub struct VmMemoryMetrics { pub event_sink_inner: usize, pub event_sink_history: usize, diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs index 5dc2351dcf7..f23d6f307b8 100644 --- a/core/lib/vm_interface/src/utils/dump.rs +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -1,13 +1,14 @@ -use std::collections::HashMap; +use std::{collections::HashMap, rc::Rc}; use serde::{Deserialize, Serialize}; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2BlockNumber, Transaction, H256}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmTrackingContracts, + BytecodeCompressionResult, FinishedL1Batch, InspectExecutionMode, L1BatchEnv, L2BlockEnv, + PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceExt, VmInterfaceHistoryEnabled, VmTrackingContracts, }; fn create_storage_snapshot( @@ -48,6 +49,7 @@ fn create_storage_snapshot( } /// VM dump allowing to re-run the VM on the same inputs. Can be (de)serialized. +/// Note, dump is not capable of finishing batch in terms of VM execution. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VmDump { pub l1_batch_env: L1BatchEnv, @@ -98,7 +100,6 @@ impl VmDump { } } } - vm.finish_batch(); vm } } @@ -139,18 +140,30 @@ impl DumpingVm { } } +impl AsRef for DumpingVm { + fn as_ref(&self) -> &Vm { + &self.inner + } +} + +impl AsMut for DumpingVm { + fn as_mut(&mut self) -> &mut Vm { + &mut self.inner + } +} + impl VmInterface for DumpingVm { type TracerDispatcher = Vm::TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { self.record_transaction(tx.clone()); - self.inner.push_transaction(tx); + self.inner.push_transaction(tx) } fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { self.inner.inspect(dispatcher, execution_mode) } @@ -177,12 +190,8 @@ impl VmInterface for DumpingVm { .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.inner.record_vm_memory_metrics() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - self.inner.finish_batch() + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + self.inner.finish_batch(pubdata_builder) } } diff --git a/core/lib/vm_interface/src/utils/mod.rs b/core/lib/vm_interface/src/utils/mod.rs index 80a51c7b144..394df7fc9a1 100644 --- a/core/lib/vm_interface/src/utils/mod.rs +++ b/core/lib/vm_interface/src/utils/mod.rs @@ -2,7 +2,9 @@ pub use self::{ dump::VmDump, - shadow::{DivergenceErrors, DivergenceHandler, ShadowVm}, + shadow::{ + CheckDivergence, DivergenceErrors, DivergenceHandler, ShadowMut, ShadowRef, ShadowVm, + }, }; mod dump; diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs index 2819e54e9a7..d12d85fa2e3 100644 --- a/core/lib/vm_interface/src/utils/shadow.rs +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -1,7 +1,9 @@ use std::{ + any, cell::RefCell, collections::{BTreeMap, BTreeSet}, fmt, + rc::Rc, sync::Arc, }; @@ -9,10 +11,11 @@ use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transact use super::dump::{DumpingVm, VmDump}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView}, - BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, VmTrackingContracts, + BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, InspectExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmTrackingContracts, }; /// Handler for VM divergences. @@ -65,6 +68,154 @@ impl VmWithReporting { } } +/// Reference to either the main or shadow VM. +#[derive(Debug)] +pub enum ShadowRef<'a, Main, Shadow> { + /// Reference to the main VM. + Main(&'a Main), + /// Reference to the shadow VM. + Shadow(&'a Shadow), +} + +/// Mutable reference to either the main or shadow VM. +#[derive(Debug)] +pub enum ShadowMut<'a, Main, Shadow> { + /// Reference to the main VM. + Main(&'a mut Main), + /// Reference to the shadow VM. + Shadow(&'a mut Shadow), +} + +/// Type that can check divergence between its instances. +pub trait CheckDivergence { + /// Checks divergences and returns a list of divergence errors, if any. + fn check_divergence(&self, other: &Self) -> DivergenceErrors; +} + +#[derive(Debug)] +struct DivergingEq(T); + +impl CheckDivergence for DivergingEq { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match(any::type_name::(), &self.0, &other.0); + errors + } +} + +impl CheckDivergence for CurrentExecutionState { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match("final_state.events", &self.events, &other.events); + errors.check_match( + "final_state.user_l2_to_l1_logs", + &self.user_l2_to_l1_logs, + &other.user_l2_to_l1_logs, + ); + errors.check_match( + "final_state.system_logs", + &self.system_logs, + &other.system_logs, + ); + errors.check_match( + "final_state.storage_refunds", + &self.storage_refunds, + &other.storage_refunds, + ); + errors.check_match( + "final_state.pubdata_costs", + &self.pubdata_costs, + &other.pubdata_costs, + ); + errors.check_match( + "final_state.used_contract_hashes", + &self.used_contract_hashes.iter().collect::>(), + &other.used_contract_hashes.iter().collect::>(), + ); + + let main_deduplicated_logs = DivergenceErrors::gather_logs(&self.deduplicated_storage_logs); + let shadow_deduplicated_logs = + DivergenceErrors::gather_logs(&other.deduplicated_storage_logs); + errors.check_match( + "deduplicated_storage_logs", + &main_deduplicated_logs, + &shadow_deduplicated_logs, + ); + errors + } +} + +impl CheckDivergence for VmExecutionResultAndLogs { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match("result", &self.result, &other.result); + errors.check_match("logs.events", &self.logs.events, &other.logs.events); + errors.check_match( + "logs.system_l2_to_l1_logs", + &self.logs.system_l2_to_l1_logs, + &other.logs.system_l2_to_l1_logs, + ); + errors.check_match( + "logs.user_l2_to_l1_logs", + &self.logs.user_l2_to_l1_logs, + &other.logs.user_l2_to_l1_logs, + ); + let main_logs = UniqueStorageLogs::new(&self.logs.storage_logs); + let shadow_logs = UniqueStorageLogs::new(&other.logs.storage_logs); + errors.check_match("logs.storage_logs", &main_logs, &shadow_logs); + errors.check_match("refunds", &self.refunds, &other.refunds); + errors.check_match( + "statistics.circuit_statistic", + &self.statistics.circuit_statistic, + &other.statistics.circuit_statistic, + ); + errors.check_match( + "statistics.pubdata_published", + &self.statistics.pubdata_published, + &other.statistics.pubdata_published, + ); + errors.check_match( + "statistics.gas_remaining", + &self.statistics.gas_remaining, + &other.statistics.gas_remaining, + ); + errors.check_match( + "statistics.gas_used", + &self.statistics.gas_used, + &other.statistics.gas_used, + ); + errors.check_match( + "statistics.computational_gas_used", + &self.statistics.computational_gas_used, + &other.statistics.computational_gas_used, + ); + errors + } +} + +impl CheckDivergence for FinishedL1Batch { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.extend( + self.block_tip_execution_result + .check_divergence(&other.block_tip_execution_result), + ); + errors.extend( + self.final_execution_state + .check_divergence(&other.final_execution_state), + ); + + errors.check_match( + "final_bootloader_memory", + &self.final_bootloader_memory, + &other.final_bootloader_memory, + ); + errors.check_match("pubdata_input", &self.pubdata_input, &other.pubdata_input); + errors.check_match("state_diffs", &self.state_diffs, &other.state_diffs); + errors + } +} + /// Shadowed VM that executes 2 VMs for each operation and compares their outputs. /// /// If a divergence is detected, the VM state is dumped using [a pluggable handler](Self::set_dump_handler()), @@ -105,6 +256,66 @@ where pub fn dump_state(&self) -> VmDump { self.main.dump_state() } + + /// Gets the specified value from both the main and shadow VM, checking whether it matches on both. + pub fn get(&self, name: &str, mut action: impl FnMut(ShadowRef<'_, Main, Shadow>) -> R) -> R + where + R: PartialEq + fmt::Debug + 'static, + { + self.get_custom(name, |r| DivergingEq(action(r))).0 + } + + /// Same as [`Self::get()`], but uses custom divergence checks for the type encapsulated in the [`CheckDivergence`] trait. + pub fn get_custom( + &self, + name: &str, + mut action: impl FnMut(ShadowRef<'_, Main, Shadow>) -> R, + ) -> R { + let main_output = action(ShadowRef::Main(self.main.as_ref())); + let borrow = self.shadow.borrow(); + if let Some(shadow) = &*borrow { + let shadow_output = action(ShadowRef::Shadow(&shadow.vm)); + let errors = main_output.check_divergence(&shadow_output); + if let Err(err) = errors.into_result() { + drop(borrow); + self.report_shared(err.context(format!("get({name})"))); + } + } + main_output + } + + /// Gets the specified value from both the main and shadow VM, potentially changing their state + /// and checking whether the returned value matches. + pub fn get_mut( + &mut self, + name: &str, + mut action: impl FnMut(ShadowMut<'_, Main, Shadow>) -> R, + ) -> R + where + R: PartialEq + fmt::Debug + 'static, + { + self.get_custom_mut(name, |r| DivergingEq(action(r))).0 + } + + /// Same as [`Self::get_mut()`], but uses custom divergence checks for the type encapsulated in the [`CheckDivergence`] trait. + pub fn get_custom_mut( + &mut self, + name: &str, + mut action: impl FnMut(ShadowMut<'_, Main, Shadow>) -> R, + ) -> R + where + R: CheckDivergence, + { + let main_output = action(ShadowMut::Main(self.main.as_mut())); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_output = action(ShadowMut::Shadow(&mut shadow.vm)); + let errors = main_output.check_divergence(&shadow_output); + if let Err(err) = errors.into_result() { + self.report_shared(err.context(format!("get_mut({name})"))); + } + } + main_output + } } impl ShadowVm @@ -123,7 +334,7 @@ where where Shadow: VmFactory, { - let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage.clone()); + let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage); let shadow = Shadow::new(batch_env.clone(), system_env.clone(), shadow_storage); let shadow = VmWithReporting { vm: shadow, @@ -151,7 +362,6 @@ where } } -/// **Important.** This doesn't properly handle tracers; they are not passed to the shadow VM! impl VmInterface for ShadowVm where S: ReadStorage, @@ -163,24 +373,41 @@ where ::TracerDispatcher, ); - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + let main_result = self.main.push_transaction(tx.clone()); + // Extend lifetime to `'static` so that the result isn't mutably borrowed from the main VM. + // Unfortunately, there's no way to express that this borrow is actually immutable, which would allow not extending the lifetime unless there's a divergence. + let main_result: PushTransactionResult<'static> = PushTransactionResult { + compressed_bytecodes: main_result.compressed_bytecodes.into_owned().into(), + }; + if let Some(shadow) = self.shadow.get_mut() { - shadow.vm.push_transaction(tx.clone()); + let tx_repr = format!("{tx:?}"); // includes little data, so is OK to call proactively + let shadow_result = shadow.vm.push_transaction(tx); + + let mut errors = DivergenceErrors::new(); + errors.check_match( + "bytecodes", + &main_result.compressed_bytecodes, + &shadow_result.compressed_bytecodes, + ); + if let Err(err) = errors.into_result() { + let ctx = format!("pushing transaction {tx_repr}"); + self.report(err.context(ctx)); + } } - self.main.push_transaction(tx); + main_result } fn inspect( &mut self, (main_tracer, shadow_tracer): &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { let main_result = self.main.inspect(main_tracer, execution_mode); if let Some(shadow) = self.shadow.get_mut() { let shadow_result = shadow.vm.inspect(shadow_tracer, execution_mode); - let mut errors = DivergenceErrors::new(); - errors.check_results_match(&main_result, &shadow_result); - + let errors = main_result.check_divergence(&shadow_result); if let Err(err) = errors.into_result() { let ctx = format!("executing VM with mode {execution_mode:?}"); self.report(err.context(ctx)); @@ -202,7 +429,8 @@ where tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { - let tx_hash = tx.hash(); + let tx_repr = format!("{tx:?}"); // includes little data, so is OK to call proactively + let (main_bytecodes_result, main_tx_result) = self.main.inspect_transaction_with_bytecode_compression( main_tracer, @@ -220,11 +448,10 @@ where tx, with_compression, ); - let mut errors = DivergenceErrors::new(); - errors.check_results_match(&main_tx_result, &shadow_result.1); + let errors = main_tx_result.check_divergence(&shadow_result.1); if let Err(err) = errors.into_result() { let ctx = format!( - "inspecting transaction {tx_hash:?}, with_compression={with_compression:?}" + "inspecting transaction {tx_repr}, with_compression={with_compression:?}" ); self.report(err.context(ctx)); } @@ -232,39 +459,11 @@ where (main_bytecodes_result, main_tx_result) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.main.record_vm_memory_metrics() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let main_batch = self.main.finish_batch(); + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(pubdata_builder.clone()); if let Some(shadow) = self.shadow.get_mut() { - let shadow_batch = shadow.vm.finish_batch(); - let mut errors = DivergenceErrors::new(); - errors.check_results_match( - &main_batch.block_tip_execution_result, - &shadow_batch.block_tip_execution_result, - ); - errors.check_final_states_match( - &main_batch.final_execution_state, - &shadow_batch.final_execution_state, - ); - errors.check_match( - "final_bootloader_memory", - &main_batch.final_bootloader_memory, - &shadow_batch.final_bootloader_memory, - ); - errors.check_match( - "pubdata_input", - &main_batch.pubdata_input, - &shadow_batch.pubdata_input, - ); - errors.check_match( - "state_diffs", - &main_batch.state_diffs, - &shadow_batch.state_diffs, - ); - + let shadow_batch = shadow.vm.finish_batch(pubdata_builder); + let errors = main_batch.check_divergence(&shadow_batch); if let Err(err) = errors.into_result() { self.report(err); } @@ -305,48 +504,15 @@ impl DivergenceErrors { } } + fn extend(&mut self, from: Self) { + self.divergences.extend(from.divergences); + } + fn context(mut self, context: String) -> Self { self.context = Some(context); self } - fn check_results_match( - &mut self, - main_result: &VmExecutionResultAndLogs, - shadow_result: &VmExecutionResultAndLogs, - ) { - self.check_match("result", &main_result.result, &shadow_result.result); - self.check_match( - "logs.events", - &main_result.logs.events, - &shadow_result.logs.events, - ); - self.check_match( - "logs.system_l2_to_l1_logs", - &main_result.logs.system_l2_to_l1_logs, - &shadow_result.logs.system_l2_to_l1_logs, - ); - self.check_match( - "logs.user_l2_to_l1_logs", - &main_result.logs.user_l2_to_l1_logs, - &shadow_result.logs.user_l2_to_l1_logs, - ); - let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); - let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); - self.check_match("logs.storage_logs", &main_logs, &shadow_logs); - self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); - self.check_match( - "statistics.circuit_statistic", - &main_result.statistics.circuit_statistic, - &shadow_result.statistics.circuit_statistic, - ); - self.check_match( - "gas_remaining", - &main_result.statistics.gas_remaining, - &shadow_result.statistics.gas_remaining, - ); - } - fn check_match(&mut self, context: &str, main: &T, shadow: &T) { if main != shadow { let comparison = pretty_assertions::Comparison::new(main, shadow); @@ -355,47 +521,6 @@ impl DivergenceErrors { } } - fn check_final_states_match( - &mut self, - main: &CurrentExecutionState, - shadow: &CurrentExecutionState, - ) { - self.check_match("final_state.events", &main.events, &shadow.events); - self.check_match( - "final_state.user_l2_to_l1_logs", - &main.user_l2_to_l1_logs, - &shadow.user_l2_to_l1_logs, - ); - self.check_match( - "final_state.system_logs", - &main.system_logs, - &shadow.system_logs, - ); - self.check_match( - "final_state.storage_refunds", - &main.storage_refunds, - &shadow.storage_refunds, - ); - self.check_match( - "final_state.pubdata_costs", - &main.pubdata_costs, - &shadow.pubdata_costs, - ); - self.check_match( - "final_state.used_contract_hashes", - &main.used_contract_hashes.iter().collect::>(), - &shadow.used_contract_hashes.iter().collect::>(), - ); - - let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); - let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); - self.check_match( - "deduplicated_storage_logs", - &main_deduplicated_logs, - &shadow_deduplicated_logs, - ); - } - fn gather_logs(logs: &[StorageLog]) -> BTreeMap { logs.iter() .filter(|log| log.is_write()) diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index 90ae76be805..2c25d729e31 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -11,26 +11,34 @@ //! Generally speaking, in most cases, the tracer dispatcher is a wrapper around `Vec>`, //! where `VmTracer` is a trait implemented for a specific VM version. +use std::rc::Rc; + use zksync_types::{Transaction, H256}; use crate::{ - storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmMemoryMetrics, + pubdata::PubdataBuilder, storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, + VmExecutionResultAndLogs, }; pub trait VmInterface { /// Lifetime is used to be able to define `Option<&mut _>` as a dispatcher. type TracerDispatcher: Default; - /// Push transaction to bootloader memory. - fn push_transaction(&mut self, tx: Transaction); + /// Pushes a transaction to bootloader memory for future execution with bytecode compression (if it's supported by the VM). + /// + /// # Return value + /// + /// Returns preprocessing results, such as compressed bytecodes. The results may borrow from the VM state, + /// so you may want to inspect results before next operations with the VM, or clone the necessary parts. + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_>; /// Executes the next VM step (either next transaction or bootloader or the whole batch) /// with custom tracers. fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs; /// Start a new L2 block. @@ -44,18 +52,15 @@ pub trait VmInterface { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs); - /// Record VM memory metrics. - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics; - /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. - fn finish_batch(&mut self) -> FinishedL1Batch; + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch; } /// Extension trait for [`VmInterface`] that provides some additional methods. pub trait VmInterfaceExt: VmInterface { /// Executes the next VM step (either next transaction or bootloader or the whole batch). - fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { + fn execute(&mut self, execution_mode: InspectExecutionMode) -> VmExecutionResultAndLogs { self.inspect(&mut ::default(), execution_mode) } diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index 8a4d2db8c6f..0f1fd9d34b8 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -28,6 +28,12 @@ pub trait EnNamespace { #[method(name = "consensusGlobalConfig")] async fn consensus_global_config(&self) -> RpcResult>; + #[method(name = "blockMetadata")] + async fn block_metadata( + &self, + block_number: L2BlockNumber, + ) -> RpcResult>; + /// Lists all tokens created at or before the specified `block_number`. /// /// This method is used by EN after snapshot recovery in order to recover token records. diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index c11ea732bd6..4db58a06c59 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -13,7 +13,8 @@ use zksync_types::{ use crate::{ client::{ForWeb3Network, L2}, types::{ - Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U256, U64, + Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U64Number, + U256, U64, }, }; @@ -180,10 +181,13 @@ pub trait EthNamespace { #[method(name = "feeHistory")] async fn fee_history( &self, - block_count: U64, + block_count: U64Number, newest_block: BlockNumber, reward_percentiles: Option>, ) -> RpcResult; + + #[method(name = "maxPriorityFeePerGas")] + async fn max_priority_fee_per_gas(&self) -> RpcResult; } #[cfg(feature = "server")] diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index 9994d21107b..36ee48a54a1 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -16,7 +16,9 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; pub use zksync_types::{ api::{Block, BlockNumber, Log, TransactionReceipt, TransactionRequest}, ethabi, - web3::{BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, Work}, + web3::{ + BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, U64Number, Work, + }, Address, Transaction, H160, H256, H64, U256, U64, }; diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 46d70396aba..2bdc8094d14 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -42,9 +42,6 @@ pub enum Component { EthTxManager, /// State keeper. StateKeeper, - /// Produces input for the TEE verifier. - /// The blob is later used as input for TEE verifier. - TeeVerifierInputProducer, /// Component for housekeeping task such as cleaning blobs from GCS, reporting metrics etc. Housekeeper, /// Component for exposing APIs to prover for providing proof generation data and accepting proofs. @@ -87,9 +84,6 @@ impl FromStr for Components { "tree_api" => Ok(Components(vec![Component::TreeApi])), "state_keeper" => Ok(Components(vec![Component::StateKeeper])), "housekeeper" => Ok(Components(vec![Component::Housekeeper])), - "tee_verifier_input_producer" => { - Ok(Components(vec![Component::TeeVerifierInputProducer])) - } "eth" => Ok(Components(vec![ Component::EthWatcher, Component::EthTxAggregator, diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index b9ffb750b81..bc2fd77ae73 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -62,4 +62,5 @@ zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true assert_matches.workspace = true +const-decoder.workspace = true test-casing.workspace = true diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index d974f2e9aa1..bdd57462588 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -119,9 +119,16 @@ impl SandboxExecutor { } pub(crate) async fn mock(executor: MockOneshotExecutor) -> Self { + Self::custom_mock(executor, SandboxExecutorOptions::mock().await) + } + + pub(crate) fn custom_mock( + executor: MockOneshotExecutor, + options: SandboxExecutorOptions, + ) -> Self { Self { engine: SandboxExecutorEngine::Mock(executor), - options: SandboxExecutorOptions::mock().await, + options, storage_caches: None, } } @@ -175,7 +182,7 @@ impl SandboxExecutor { let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); let resolve_started_at = Instant::now(); let resolve_time = resolve_started_at.elapsed(); - let resolved_block_info = block_args.inner.resolve(&mut connection).await?; + let resolved_block_info = &block_args.resolved; // We don't want to emit too many logs. if resolve_time > Duration::from_millis(10) { tracing::debug!("Resolved block numbers (took {resolve_time:?})"); @@ -185,7 +192,7 @@ impl SandboxExecutor { SandboxAction::Execution { fee_input, tx } => { self.options .eth_call - .to_execute_env(&mut connection, &resolved_block_info, *fee_input, tx) + .to_execute_env(&mut connection, resolved_block_info, *fee_input, tx) .await? } &SandboxAction::Call { @@ -197,7 +204,7 @@ impl SandboxExecutor { .eth_call .to_call_env( &mut connection, - &resolved_block_info, + resolved_block_info, fee_input, enforced_base_fee, ) @@ -210,7 +217,7 @@ impl SandboxExecutor { } => { self.options .estimate_gas - .to_env(&mut connection, &resolved_block_info, fee_input, base_fee) + .to_env(&mut connection, resolved_block_info, fee_input, base_fee) .await? } }; diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index 36f10b8e9b0..b560d161ab5 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -10,7 +10,7 @@ use zksync_multivm::utils::get_eth_call_gas_limit; use zksync_types::{ api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, ProtocolVersionId, U256, }; -use zksync_vm_executor::oneshot::BlockInfo; +use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; use self::vm_metrics::SandboxStage; pub(super) use self::{ @@ -285,21 +285,32 @@ pub enum BlockArgsError { } /// Information about a block provided to VM. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub(crate) struct BlockArgs { inner: BlockInfo, + resolved: ResolvedBlockInfo, block_id: api::BlockId, } impl BlockArgs { pub async fn pending(connection: &mut Connection<'_, Core>) -> anyhow::Result { let inner = BlockInfo::pending(connection).await?; + let resolved = inner.resolve(connection).await?; Ok(Self { inner, + resolved, block_id: api::BlockId::Number(api::BlockNumber::Pending), }) } + pub fn protocol_version(&self) -> ProtocolVersionId { + self.resolved.protocol_version() + } + + pub fn use_evm_emulator(&self) -> bool { + self.resolved.use_evm_emulator() + } + /// Loads block information from DB. pub async fn new( connection: &mut Connection<'_, Core>, @@ -326,8 +337,10 @@ impl BlockArgs { return Err(BlockArgsError::Missing); }; + let inner = BlockInfo::for_existing_block(connection, block_number).await?; Ok(Self { - inner: BlockInfo::for_existing_block(connection, block_number).await?, + inner, + resolved: inner.resolve(connection).await?, block_id, }) } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 38115b5251f..e342f2d73de 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -5,15 +5,13 @@ use std::collections::HashMap; use assert_matches::assert_matches; use test_casing::test_casing; use zksync_dal::ConnectionPool; -use zksync_multivm::{ - interface::{ExecutionResult, Halt, VmRevertReason}, - utils::derive_base_fee_and_gas_per_pubdata, -}; +use zksync_multivm::{interface::ExecutionResult, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_node_test_utils::{create_l1_batch, create_l2_block, prepare_recovery_snapshot}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api::state_override::{OverrideAccount, StateOverride}, + fee::Fee, fee_model::BatchFeeInput, K256PrivateKey, ProtocolVersionId, Transaction, U256, }; @@ -95,17 +93,6 @@ async fn creating_block_args_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); - assert_eq!( - pending_block_args.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - ); - assert_eq!( - pending_block_args.resolved_block_number(), - snapshot_recovery.l2_block_number + 1 - ); - assert!(pending_block_args.is_pending()); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) .await .unwrap(); @@ -124,6 +111,35 @@ async fn creating_block_args_after_snapshot_recovery() { .unwrap_err(); assert_matches!(err, BlockArgsError::Missing); + // Ensure there is a batch in the storage. + let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); + storage + .blocks_dal() + .insert_l2_block(&l2_block) + .await + .unwrap(); + storage + .blocks_dal() + .insert_mock_l1_batch(&create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1)) + .await + .unwrap(); + storage + .blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(snapshot_recovery.l1_batch_number + 1) + .await + .unwrap(); + + let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); + assert_eq!( + pending_block_args.block_id, + api::BlockId::Number(api::BlockNumber::Pending) + ); + assert_eq!( + pending_block_args.resolved_block_number(), + snapshot_recovery.l2_block_number + 2 + ); + assert!(pending_block_args.is_pending()); + let pruned_blocks = [ api::BlockNumber::Earliest, 0.into(), @@ -149,13 +165,6 @@ async fn creating_block_args_after_snapshot_recovery() { assert_matches!(err, BlockArgsError::Missing); } - let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); - storage - .blocks_dal() - .insert_l2_block(&l2_block) - .await - .unwrap(); - let latest_block_args = BlockArgs::new(&mut storage, latest_block, &start_info) .await .unwrap(); @@ -213,11 +222,16 @@ async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args let fee_input = BatchFeeInput::l1_pegged(55, 555); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = Transaction::from(K256PrivateKey::random().create_transfer( + let tx = K256PrivateKey::random().create_transfer_with_fee( 0.into(), - base_fee, - gas_per_pubdata, - )); + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); + let tx = Transaction::from(tx); let (limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = limiter.acquire().await.unwrap(); @@ -256,7 +270,15 @@ async fn validating_transaction(set_balance: bool) { let fee_input = BatchFeeInput::l1_pegged(55, 555); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = K256PrivateKey::random().create_transfer(0.into(), base_fee, gas_per_pubdata); + let tx = K256PrivateKey::random().create_transfer_with_fee( + 0.into(), + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); let (limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = limiter.acquire().await.unwrap(); @@ -285,18 +307,6 @@ async fn validating_transaction(set_balance: bool) { if set_balance { assert_matches!(result, ExecutionResult::Success { .. }); } else { - // FIXME: maybe provide a better way to encode it? - let expected_reason = VmRevertReason::Unknown { - function_selector: vec![3, 235, 139, 84], - data: vec![ - 3, 235, 139, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 6, 157, 185, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }; - assert_matches!( - result, - ExecutionResult::Halt { reason: Halt::ValidationFailed(reason) } if reason == expected_reason - ); + assert_matches!(result, ExecutionResult::Halt { .. }); } } diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 5ee9cfb8ef1..c2f900484ba 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -1,53 +1,68 @@ //! Test utils shared among multiple modules. -use std::iter; +use std::{collections::HashMap, iter}; +use const_decoder::Decoder; use zk_evm_1_5_0::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_contracts::{ - get_loadnext_contract, load_contract, read_bytecode, + eth_contract, get_loadnext_contract, load_contract, read_bytecode, test_contracts::LoadnextContractExecutionParams, }; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ - ethabi::Token, fee::Fee, l2::L2Tx, transaction_request::PaymasterParams, Address, - K256PrivateKey, L2ChainId, Nonce, H256, U256, + api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, + ethabi, + ethabi::Token, + fee::Fee, + fee_model::FeeParams, + get_code_key, get_known_code_key, + l2::L2Tx, + transaction_request::{CallRequest, PaymasterParams}, + utils::storage_key_for_eth_balance, + AccountTreeId, Address, K256PrivateKey, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, + StorageKey, StorageLog, H256, U256, }; +use zksync_utils::{address_to_u256, u256_to_h256}; -pub(crate) const LOAD_TEST_ADDRESS: Address = Address::repeat_byte(1); +pub(crate) const RAW_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"00000000000000000000000000000000000000000000000000000000000001266080604052348015\ + 600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063fb5343f314604c57\ + 5b5f80fd5b604a60048036038101906046919060a6565b6066565b005b6052606f565b604051605d\ + 919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd5b5f819050919050\ + 565b6088816078565b81146091575f80fd5b50565b5f8135905060a0816081565b92915050565b5f\ + 6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092915050565b60d381\ + 6078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056fea2646970667358\ + 221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9ce4d0964736f6c63\ + 4300081a00330000000000000000000000000000000000000000000000000000" +); +pub(crate) const PROCESSED_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"6080604052348015600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063\ + fb5343f314604c575b5f80fd5b604a60048036038101906046919060a6565b6066565b005b605260\ + 6f565b604051605d919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd\ + 5b5f819050919050565b6088816078565b81146091575f80fd5b50565b5f8135905060a081608156\ + 5b92915050565b5f6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092\ + 915050565b60d3816078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056\ + fea2646970667358221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9\ + ce4d0964736f6c634300081a0033" +); const EXPENSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; -pub(crate) const EXPENSIVE_CONTRACT_ADDRESS: Address = Address::repeat_byte(2); - const PRECOMPILES_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json"; -pub(crate) const PRECOMPILES_CONTRACT_ADDRESS: Address = Address::repeat_byte(3); - const COUNTER_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json"; -pub(crate) const COUNTER_CONTRACT_ADDRESS: Address = Address::repeat_byte(4); - const INFINITE_LOOP_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/infinite/infinite.sol/InfiniteLoop.json"; -pub(crate) const INFINITE_LOOP_CONTRACT_ADDRESS: Address = Address::repeat_byte(5); - -pub(crate) fn read_expensive_contract_bytecode() -> Vec { - read_bytecode(EXPENSIVE_CONTRACT_PATH) -} - -pub(crate) fn read_precompiles_contract_bytecode() -> Vec { - read_bytecode(PRECOMPILES_CONTRACT_PATH) -} - -pub(crate) fn read_counter_contract_bytecode() -> Vec { - read_bytecode(COUNTER_CONTRACT_PATH) -} - -pub(crate) fn read_infinite_loop_contract_bytecode() -> Vec { - read_bytecode(INFINITE_LOOP_CONTRACT_PATH) -} +const MULTICALL3_CONTRACT_PATH: &str = + "contracts/l2-contracts/artifacts-zk/contracts/dev-contracts/Multicall3.sol/Multicall3.json"; /// Inflates the provided bytecode by appending the specified amount of NOP instructions at the end. -pub(crate) fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { +fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { bytecode.extend( iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) .take(nop_count) @@ -56,25 +71,266 @@ pub(crate) fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { } fn default_fee() -> Fee { + let fee_input = FeeParams::sensible_v1_default().scale(1.0, 1.0); + let (max_fee_per_gas, gas_per_pubdata_limit) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::default().into()); Fee { - gas_limit: 200_000.into(), - max_fee_per_gas: 55.into(), + gas_limit: 10_000_000.into(), + max_fee_per_gas: max_fee_per_gas.into(), max_priority_fee_per_gas: 0_u64.into(), - gas_per_pubdata_limit: 555.into(), + gas_per_pubdata_limit: gas_per_pubdata_limit.into(), } } +#[derive(Debug, Default)] +pub(crate) struct StateBuilder { + inner: HashMap, +} + +impl StateBuilder { + pub(crate) const LOAD_TEST_ADDRESS: Address = Address::repeat_byte(1); + pub(crate) const EXPENSIVE_CONTRACT_ADDRESS: Address = Address::repeat_byte(2); + pub(crate) const PRECOMPILES_CONTRACT_ADDRESS: Address = Address::repeat_byte(3); + const COUNTER_CONTRACT_ADDRESS: Address = Address::repeat_byte(4); + const INFINITE_LOOP_CONTRACT_ADDRESS: Address = Address::repeat_byte(5); + const MULTICALL3_ADDRESS: Address = Address::repeat_byte(6); + + pub fn with_contract(mut self, address: Address, bytecode: Vec) -> Self { + self.inner.insert( + address, + OverrideAccount { + code: Some(Bytecode::new(bytecode).unwrap()), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn inflate_bytecode(mut self, address: Address, nop_count: usize) -> Self { + let account_override = self.inner.get_mut(&address).expect("no contract"); + let bytecode = account_override.code.take().expect("no code override"); + let mut bytecode = bytecode.into_bytes(); + inflate_bytecode(&mut bytecode, nop_count); + account_override.code = Some(Bytecode::new(bytecode).unwrap()); + self + } + + pub fn with_load_test_contract(mut self) -> Self { + // Set the array length in the load test contract to 100, so that reads don't fail. + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); + self.inner.insert( + Self::LOAD_TEST_ADDRESS, + OverrideAccount { + code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), + state: Some(OverrideState::State(state)), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn with_balance(mut self, address: Address, balance: U256) -> Self { + self.inner.entry(address).or_default().balance = Some(balance); + self + } + + pub fn with_expensive_contract(self) -> Self { + self.with_contract( + Self::EXPENSIVE_CONTRACT_ADDRESS, + read_bytecode(EXPENSIVE_CONTRACT_PATH), + ) + } + + pub fn with_precompiles_contract(self) -> Self { + self.with_contract( + Self::PRECOMPILES_CONTRACT_ADDRESS, + read_bytecode(PRECOMPILES_CONTRACT_PATH), + ) + } + + pub fn with_counter_contract(self, initial_value: u64) -> Self { + let mut this = self.with_contract( + Self::COUNTER_CONTRACT_ADDRESS, + read_bytecode(COUNTER_CONTRACT_PATH), + ); + if initial_value != 0 { + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(initial_value))]); + this.inner + .get_mut(&Self::COUNTER_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::State(state)); + } + this + } + + pub fn with_infinite_loop_contract(self) -> Self { + self.with_contract( + Self::INFINITE_LOOP_CONTRACT_ADDRESS, + read_bytecode(INFINITE_LOOP_CONTRACT_PATH), + ) + } + + pub fn with_multicall3_contract(self) -> Self { + self.with_contract( + Self::MULTICALL3_ADDRESS, + read_bytecode(MULTICALL3_CONTRACT_PATH), + ) + } + + pub fn build(self) -> StateOverride { + StateOverride::new(self.inner) + } + + /// Applies these state overrides to Postgres storage, which is assumed to be empty (other than genesis data). + pub async fn apply(self, connection: &mut Connection<'_, Core>) { + let mut storage_logs = vec![]; + let mut factory_deps = HashMap::new(); + for (address, account) in self.inner { + if let Some(balance) = account.balance { + let balance_key = storage_key_for_eth_balance(&address); + storage_logs.push(StorageLog::new_write_log( + balance_key, + u256_to_h256(balance), + )); + } + if let Some(code) = account.code { + let code_hash = code.hash(); + storage_logs.extend([ + StorageLog::new_write_log(get_code_key(&address), code_hash), + StorageLog::new_write_log( + get_known_code_key(&code_hash), + H256::from_low_u64_be(1), + ), + ]); + factory_deps.insert(code_hash, code.into_bytes()); + } + if let Some(state) = account.state { + let state_slots = match state { + OverrideState::State(slots) | OverrideState::StateDiff(slots) => slots, + }; + let state_logs = state_slots.into_iter().map(|(key, value)| { + let key = StorageKey::new(AccountTreeId::new(address), key); + StorageLog::new_write_log(key, value) + }); + storage_logs.extend(state_logs); + } + } + + connection + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &storage_logs) + .await + .unwrap(); + connection + .factory_deps_dal() + .insert_factory_deps(L2BlockNumber(0), &factory_deps) + .await + .unwrap(); + } +} + +#[derive(Debug)] +pub(crate) struct Call3Value { + target: Address, + allow_failure: bool, + value: U256, + calldata: Vec, +} + +impl Call3Value { + pub fn allow_failure(mut self) -> Self { + self.allow_failure = true; + self + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::Address(self.target), + Token::Bool(self.allow_failure), + Token::Uint(self.value), + Token::Bytes(self.calldata.clone()), + ]) + } +} + +impl From for Call3Value { + fn from(req: CallRequest) -> Self { + Self { + target: req.to.unwrap(), + allow_failure: false, + value: req.value.unwrap_or_default(), + calldata: req.data.unwrap_or_default().0, + } + } +} + +impl From for Call3Value { + fn from(tx: L2Tx) -> Self { + Self { + target: tx.recipient_account().unwrap(), + allow_failure: false, + value: tx.execute.value, + calldata: tx.execute.calldata, + } + } +} + +#[derive(Debug)] +pub(crate) struct Call3Result { + pub success: bool, + pub return_data: Vec, +} + +impl Call3Result { + pub fn parse(raw: &[u8]) -> Vec { + let mut tokens = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .decode_output(raw) + .expect("failed decoding `aggregate3Value` output"); + assert_eq!(tokens.len(), 1, "Invalid output length"); + let Token::Array(results) = tokens.pop().unwrap() else { + panic!("Invalid token type, expected an array"); + }; + results.into_iter().map(Self::parse_single).collect() + } + + fn parse_single(token: Token) -> Self { + let Token::Tuple(mut tokens) = token else { + panic!("Invalid token type, expected a tuple"); + }; + assert_eq!(tokens.len(), 2); + let return_data = tokens.pop().unwrap().into_bytes().expect("expected bytes"); + let success = tokens.pop().unwrap().into_bool().expect("expected bool"); + Self { + success, + return_data, + } + } + + pub fn as_u256(&self) -> U256 { + decode_u256_output(&self.return_data) + } +} + +pub(crate) fn decode_u256_output(raw_output: &[u8]) -> U256 { + let mut tokens = ethabi::decode_whole(&[ethabi::ParamType::Uint(256)], raw_output) + .expect("unexpected return data"); + assert_eq!(tokens.len(), 1); + tokens.pop().unwrap().into_uint().unwrap() +} + pub(crate) trait TestAccount { - fn create_transfer(&self, value: U256, fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { + fn create_transfer(&self, value: U256) -> L2Tx { let fee = Fee { gas_limit: 200_000.into(), - max_fee_per_gas: fee_per_gas.into(), - max_priority_fee_per_gas: 0_u64.into(), - gas_per_pubdata_limit: gas_per_pubdata.into(), + ..default_fee() }; self.create_transfer_with_fee(value, fee) } + fn query_base_token_balance(&self) -> CallRequest; + fn create_transfer_with_fee(&self, value: U256, fee: Fee) -> L2Tx; fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx; @@ -85,9 +341,13 @@ pub(crate) trait TestAccount { fn create_code_oracle_tx(&self, bytecode_hash: H256, expected_keccak_hash: H256) -> L2Tx; - fn create_reverting_counter_tx(&self) -> L2Tx; + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx; + + fn query_counter_value(&self) -> CallRequest; fn create_infinite_loop_tx(&self) -> L2Tx; + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest; } impl TestAccount for K256PrivateKey { @@ -106,9 +366,23 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn query_base_token_balance(&self) -> CallRequest { + let data = eth_contract() + .function("balanceOf") + .expect("No `balanceOf` function in contract") + .encode_input(&[Token::Uint(address_to_u256(&self.address()))]) + .expect("failed encoding `balanceOf` function"); + CallRequest { + from: Some(self.address()), + to: Some(L2_BASE_TOKEN_ADDRESS), + data: Some(data.into()), + ..CallRequest::default() + } + } + fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx { L2Tx::new_signed( - Some(LOAD_TEST_ADDRESS), + Some(StateBuilder::LOAD_TEST_ADDRESS), params.to_bytes(), Nonce(0), default_fee(), @@ -132,7 +406,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[Token::Uint(write_count.into())]) .expect("failed encoding `expensive` function"); L2Tx::new_signed( - Some(EXPENSIVE_CONTRACT_ADDRESS), + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -152,7 +426,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[]) .expect("failed encoding `cleanUp` input"); L2Tx::new_signed( - Some(EXPENSIVE_CONTRACT_ADDRESS), + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -175,7 +449,7 @@ impl TestAccount for K256PrivateKey { ]) .expect("failed encoding `callCodeOracle` input"); L2Tx::new_signed( - Some(PRECOMPILES_CONTRACT_ADDRESS), + Some(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -188,14 +462,14 @@ impl TestAccount for K256PrivateKey { .unwrap() } - fn create_reverting_counter_tx(&self) -> L2Tx { + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx { let calldata = load_contract(COUNTER_CONTRACT_PATH) .function("incrementWithRevert") .expect("no `incrementWithRevert` function") - .encode_input(&[Token::Uint(1.into()), Token::Bool(true)]) + .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) .expect("failed encoding `incrementWithRevert` input"); L2Tx::new_signed( - Some(COUNTER_CONTRACT_ADDRESS), + Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -208,6 +482,20 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn query_counter_value(&self) -> CallRequest { + let calldata = load_contract(COUNTER_CONTRACT_PATH) + .function("get") + .expect("no `get` function") + .encode_input(&[]) + .expect("failed encoding `get` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + data: Some(calldata.into()), + ..CallRequest::default() + } + } + fn create_infinite_loop_tx(&self) -> L2Tx { let calldata = load_contract(INFINITE_LOOP_CONTRACT_PATH) .function("infiniteLoop") @@ -215,7 +503,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[]) .expect("failed encoding `infiniteLoop` input"); L2Tx::new_signed( - Some(INFINITE_LOOP_CONTRACT_ADDRESS), + Some(StateBuilder::INFINITE_LOOP_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -227,4 +515,20 @@ impl TestAccount for K256PrivateKey { ) .unwrap() } + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest { + let call_tokens = calls.iter().map(Call3Value::to_token).collect(); + let calldata = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .encode_input(&[Token::Array(call_tokens)]) + .expect("failed encoding `aggregate3Value` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::MULTICALL3_ADDRESS), + value: Some(value), + data: Some(calldata.into()), + ..CallRequest::default() + } + } } diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index f5e42875a3d..b4a05a0756b 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -44,13 +44,14 @@ impl TxSender { pub async fn get_txs_fee_in_wei( &self, tx: Transaction, + block_args: BlockArgs, estimated_fee_scale_factor: f64, acceptable_overestimation: u64, state_override: Option, kind: BinarySearchKind, ) -> Result { let estimation_started_at = Instant::now(); - let mut estimator = GasEstimator::new(self, tx, state_override).await?; + let mut estimator = GasEstimator::new(self, tx, block_args, state_override).await?; estimator.adjust_transaction_fee(); let initial_estimate = estimator.initialize().await?; @@ -130,10 +131,7 @@ impl TxSender { if let Some(pivot) = initial_pivot { let iteration_started_at = Instant::now(); - let (result, _) = estimator - .step(pivot) - .await - .context("estimate_gas step failed")?; + let (result, _) = estimator.step(pivot).await?; Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, pivot, &result); tracing::trace!( @@ -150,10 +148,7 @@ impl TxSender { // or normal execution errors, so we just hope that increasing the // gas limit will make the transaction successful let iteration_started_at = Instant::now(); - let (result, _) = estimator - .step(mid) - .await - .context("estimate_gas step failed")?; + let (result, _) = estimator.step(mid).await?; Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, mid, &result); tracing::trace!( @@ -205,7 +200,11 @@ impl TxSender { tx.initiator_account(), tx.execute.value ); - return Err(SubmitTxError::InsufficientFundsForTransfer); + return Err(SubmitTxError::NotEnoughBalanceForFeeValue( + balance, + 0.into(), + tx.execute.value, + )); } } Ok(()) @@ -309,16 +308,10 @@ impl<'a> GasEstimator<'a> { pub(super) async fn new( sender: &'a TxSender, mut transaction: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { - let mut connection = sender.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - let protocol_version = connection - .blocks_dal() - .pending_protocol_version() - .await - .context("failed getting pending protocol version")?; - drop(connection); + let protocol_version = block_args.protocol_version(); let max_gas_limit = get_max_batch_gas_limit(protocol_version.into()); let fee_input = adjust_pubdata_price_for_tx( @@ -398,10 +391,7 @@ impl<'a> GasEstimator<'a> { // For L2 transactions, we estimate the amount of gas needed to cover for the pubdata by creating a transaction with infinite gas limit, // and getting how much pubdata it used. - let (result, _) = self - .unadjusted_step(self.max_gas_limit) - .await - .context("estimate_gas step failed")?; + let (result, _) = self.unadjusted_step(self.max_gas_limit).await?; // If the transaction has failed with such a large gas limit, we return an API error here right away, // since the inferred gas bounds would be unreliable in this case. result.check_api_call_result()?; @@ -435,7 +425,7 @@ impl<'a> GasEstimator<'a> { async fn step( &self, tx_gas_limit: u64, - ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { let gas_limit_with_overhead = tx_gas_limit + self.tx_overhead(tx_gas_limit); // We need to ensure that we never use a gas limit that is higher than the maximum allowed let forced_gas_limit = @@ -446,13 +436,16 @@ impl<'a> GasEstimator<'a> { pub(super) async fn unadjusted_step( &self, forced_gas_limit: u64, - ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { let mut tx = self.transaction.clone(); match &mut tx.common_data { ExecuteTransactionCommon::L1(l1_common_data) => { l1_common_data.gas_limit = forced_gas_limit.into(); - let required_funds = - l1_common_data.gas_limit * l1_common_data.max_fee_per_gas + tx.execute.value; + // Since `tx.execute.value` is supplied by the client and is not checked against the current balance (unlike for L2 transactions), + // we may hit an integer overflow. Ditto for protocol upgrade transactions below. + let required_funds = (l1_common_data.gas_limit * l1_common_data.max_fee_per_gas) + .checked_add(tx.execute.value) + .ok_or(SubmitTxError::MintedAmountOverflow)?; l1_common_data.to_mint = required_funds; } ExecuteTransactionCommon::L2(l2_common_data) => { @@ -460,8 +453,9 @@ impl<'a> GasEstimator<'a> { } ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { common_data.gas_limit = forced_gas_limit.into(); - let required_funds = - common_data.gas_limit * common_data.max_fee_per_gas + tx.execute.value; + let required_funds = (common_data.gas_limit * common_data.max_fee_per_gas) + .checked_add(tx.execute.value) + .ok_or(SubmitTxError::MintedAmountOverflow)?; common_data.to_mint = required_funds; } } @@ -490,10 +484,7 @@ impl<'a> GasEstimator<'a> { suggested_gas_limit: u64, estimated_fee_scale_factor: f64, ) -> Result { - let (result, tx_metrics) = self - .step(suggested_gas_limit) - .await - .context("final estimate_gas step failed")?; + let (result, tx_metrics) = self.step(suggested_gas_limit).await?; result.into_api_call_result()?; self.sender .ensure_tx_executable(&self.transaction, &tx_metrics, false)?; diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index ad8e38ef3cc..38794fe7137 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -29,7 +29,9 @@ use zksync_types::{ MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::h256_to_u256; -use zksync_vm_executor::oneshot::{CallOrExecute, EstimateGas, OneshotEnvParameters}; +use zksync_vm_executor::oneshot::{ + CallOrExecute, EstimateGas, MultiVMBaseSystemContracts, OneshotEnvParameters, +}; pub(super) use self::{gas_estimation::BinarySearchKind, result::SubmitTxError}; use self::{master_pool_sink::MasterPoolSink, result::ApiCallResult, tx_sink::TxSink}; @@ -102,15 +104,28 @@ impl SandboxExecutorOptions { operator_account: AccountTreeId, validation_computational_gas_limit: u32, ) -> anyhow::Result { + let estimate_gas_contracts = + tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_estimate_gas_blocking) + .await + .context("failed loading base contracts for gas estimation")?; + let call_contracts = + tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking) + .await + .context("failed loading base contracts for calls / tx execution")?; + Ok(Self { - estimate_gas: OneshotEnvParameters::for_gas_estimation(chain_id, operator_account) - .await?, - eth_call: OneshotEnvParameters::for_execution( + estimate_gas: OneshotEnvParameters::new( + Arc::new(estimate_gas_contracts), + chain_id, + operator_account, + u32::MAX, + ), + eth_call: OneshotEnvParameters::new( + Arc::new(call_contracts), chain_id, operator_account, validation_computational_gas_limit, - ) - .await?, + ), }) } @@ -280,13 +295,11 @@ impl TxSender { pub async fn submit_tx( &self, tx: L2Tx, + block_args: BlockArgs, ) -> Result<(L2TxSubmissionResult, VmExecutionResultAndLogs), SubmitTxError> { let tx_hash = tx.hash(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::Validate); - let mut connection = self.acquire_replica_connection().await?; - let protocol_version = connection.blocks_dal().pending_protocol_version().await?; - drop(connection); - self.validate_tx(&tx, protocol_version).await?; + self.validate_tx(&tx, block_args.protocol_version()).await?; stage_latency.observe(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DryRun); @@ -305,9 +318,7 @@ impl TxSender { tx: tx.clone(), }; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; - let mut connection = self.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - + let connection = self.acquire_replica_connection().await?; let execution_output = self .0 .executor diff --git a/core/node/api_server/src/tx_sender/result.rs b/core/node/api_server/src/tx_sender/result.rs index a49313f0dd6..e2a51ae8e9a 100644 --- a/core/node/api_server/src/tx_sender/result.rs +++ b/core/node/api_server/src/tx_sender/result.rs @@ -24,8 +24,6 @@ pub enum SubmitTxError { GasLimitIsTooBig, #[error("{0}")] Unexecutable(String), - #[error("too many transactions")] - RateLimitExceeded, #[error("server shutting down")] ServerShuttingDown, #[error("failed to include transaction in the system. reason: {0}")] @@ -49,29 +47,23 @@ pub enum SubmitTxError { that caused this error. Error description: {0}" )] UnexpectedVMBehavior(String), - #[error("pubdata price limit is too low, ensure that the price limit is correct")] - UnrealisticPubdataPriceLimit, #[error( "too many factory dependencies in the transaction. {0} provided, while only {1} allowed" )] TooManyFactoryDependencies(usize, usize), - #[error("max fee per gas higher than 2^32")] - FeePerGasTooHigh, - #[error("max fee per pubdata byte higher than 2^32")] - FeePerPubdataByteTooHigh, - /// InsufficientFundsForTransfer is returned if the transaction sender doesn't - /// have enough funds for transfer. - #[error("insufficient balance for transfer")] - InsufficientFundsForTransfer, /// IntrinsicGas is returned if the transaction is specified to use less gas /// than required to start the invocation. #[error("intrinsic gas too low")] IntrinsicGas, - /// Error returned from main node - #[error("{0}")] - ProxyError(#[from] EnrichedClientError), #[error("not enough gas to publish compressed bytecodes")] FailedToPublishCompressedBytecodes, + /// Currently only triggered during gas estimation for L1 and protocol upgrade transactions. + #[error("integer overflow computing base token amount to mint")] + MintedAmountOverflow, + + /// Error returned from main node. + #[error("{0}")] + ProxyError(#[from] EnrichedClientError), /// Catch-all internal error (e.g., database error) that should not be exposed to the caller. #[error("internal error")] Internal(#[from] anyhow::Error), @@ -88,7 +80,6 @@ impl SubmitTxError { Self::ExecutionReverted(_, _) => "execution-reverted", Self::GasLimitIsTooBig => "gas-limit-is-too-big", Self::Unexecutable(_) => "unexecutable", - Self::RateLimitExceeded => "rate-limit-exceeded", Self::ServerShuttingDown => "shutting-down", Self::BootloaderFailure(_) => "bootloader-failure", Self::ValidationFailed(_) => "validation-failed", @@ -99,14 +90,11 @@ impl SubmitTxError { Self::MaxFeePerGasTooLow => "max-fee-per-gas-too-low", Self::MaxPriorityFeeGreaterThanMaxFee => "max-priority-fee-greater-than-max-fee", Self::UnexpectedVMBehavior(_) => "unexpected-vm-behavior", - Self::UnrealisticPubdataPriceLimit => "unrealistic-pubdata-price-limit", Self::TooManyFactoryDependencies(_, _) => "too-many-factory-dependencies", - Self::FeePerGasTooHigh => "gas-price-limit-too-high", - Self::FeePerPubdataByteTooHigh => "pubdata-price-limit-too-high", - Self::InsufficientFundsForTransfer => "insufficient-funds-for-transfer", Self::IntrinsicGas => "intrinsic-gas", - Self::ProxyError(_) => "proxy-error", Self::FailedToPublishCompressedBytecodes => "failed-to-publish-compressed-bytecodes", + Self::MintedAmountOverflow => "minted-amount-overflow", + Self::ProxyError(_) => "proxy-error", Self::Internal(_) => "internal", } } diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs deleted file mode 100644 index 36c95fa5db0..00000000000 --- a/core/node/api_server/src/tx_sender/tests.rs +++ /dev/null @@ -1,805 +0,0 @@ -//! Tests for the transaction sender. - -use std::{collections::HashMap, time::Duration}; - -use assert_matches::assert_matches; -use test_casing::{test_casing, Product, TestCases}; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_multivm::interface::ExecutionResult; -use zksync_node_fee_model::MockBatchFeeParamsProvider; -use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; -use zksync_system_constants::CODE_ORACLE_ADDRESS; -use zksync_types::{ - api, - api::state_override::{Bytecode, OverrideAccount, OverrideState}, - get_nonce_key, - web3::keccak256, - K256PrivateKey, L1BatchNumber, L2BlockNumber, StorageLog, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use zksync_vm_executor::oneshot::MockOneshotExecutor; - -use super::{gas_estimation::GasEstimator, *}; -use crate::{ - execution_sandbox::BlockStartInfo, - testonly::{ - inflate_bytecode, read_counter_contract_bytecode, read_expensive_contract_bytecode, - read_infinite_loop_contract_bytecode, read_precompiles_contract_bytecode, TestAccount, - COUNTER_CONTRACT_ADDRESS, EXPENSIVE_CONTRACT_ADDRESS, INFINITE_LOOP_CONTRACT_ADDRESS, - LOAD_TEST_ADDRESS, PRECOMPILES_CONTRACT_ADDRESS, - }, - web3::testonly::create_test_tx_sender, -}; - -/// Initial pivot multiplier empirically sufficient for most tx types. -const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; - -#[tokio::test] -async fn getting_nonce_for_account() { - let l2_chain_id = L2ChainId::default(); - let test_address = Address::repeat_byte(1); - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - // Manually insert a nonce for the address. - let nonce_key = get_nonce_key(&test_address); - let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(123)); - storage - .storage_logs_dal() - .append_storage_logs(L2BlockNumber(0), &[nonce_log]) - .await - .unwrap(); - - let tx_executor = MockOneshotExecutor::default(); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(123)); - - // Insert another L2 block with a new nonce log. - storage - .blocks_dal() - .insert_l2_block(&create_l2_block(1)) - .await - .unwrap(); - let nonce_log = StorageLog { - value: H256::from_low_u64_be(321), - ..nonce_log - }; - storage - .storage_logs_dal() - .insert_storage_logs(L2BlockNumber(1), &[nonce_log]) - .await - .unwrap(); - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(321)); - let missing_address = Address::repeat_byte(0xff); - let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); - assert_eq!(nonce, Nonce(0)); -} - -#[tokio::test] -async fn getting_nonce_for_account_after_snapshot_recovery() { - const SNAPSHOT_L2_BLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); - - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - let test_address = Address::repeat_byte(1); - let other_address = Address::repeat_byte(2); - let nonce_logs = [ - StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), - StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), - ]; - prepare_recovery_snapshot( - &mut storage, - L1BatchNumber(23), - SNAPSHOT_L2_BLOCK_NUMBER, - &nonce_logs, - ) - .await; - - let l2_chain_id = L2ChainId::default(); - let tx_executor = MockOneshotExecutor::default(); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - storage - .blocks_dal() - .insert_l2_block(&create_l2_block(SNAPSHOT_L2_BLOCK_NUMBER.0 + 1)) - .await - .unwrap(); - let new_nonce_logs = vec![StorageLog::new_write_log( - get_nonce_key(&test_address), - H256::from_low_u64_be(321), - )]; - storage - .storage_logs_dal() - .insert_storage_logs(SNAPSHOT_L2_BLOCK_NUMBER + 1, &new_nonce_logs) - .await - .unwrap(); - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(321)); - let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); - assert_eq!(nonce, Nonce(25)); - let missing_address = Address::repeat_byte(0xff); - let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); - assert_eq!(nonce, Nonce(0)); -} - -#[tokio::test] -async fn submitting_tx_requires_one_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - - let l2_chain_id = L2ChainId::default(); - let fee_input = MockBatchFeeParamsProvider::default() - .get_batch_fee_input_scaled(1.0, 1.0) - .await - .unwrap(); - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = create_l2_transaction(base_fee, gas_per_pubdata); - let tx_hash = tx.hash(); - - // Manually set sufficient balance for the tx initiator. - let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); - let storage_log = StorageLog::new_write_log(balance_key, u256_to_h256(U256::one() << 64)); - storage - .storage_logs_dal() - .append_storage_logs(L2BlockNumber(0), &[storage_log]) - .await - .unwrap(); - drop(storage); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_tx_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { output: vec![] } - }); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - let submission_result = tx_sender.submit_tx(tx).await.unwrap(); - assert_matches!(submission_result.0, L2TxSubmissionResult::Added); - - let mut storage = pool.connection().await.unwrap(); - storage - .transactions_web3_dal() - .get_transaction_by_hash(tx_hash, l2_chain_id) - .await - .unwrap() - .expect("transaction is not persisted"); -} - -#[tokio::test] -async fn eth_call_requires_single_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - let genesis_params = GenesisParams::mock(); - insert_genesis_batch(&mut storage, &genesis_params) - .await - .unwrap(); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) - .await - .unwrap(); - let block_id = api::BlockId::Number(api::BlockNumber::Latest); - let block_args = BlockArgs::new(&mut storage, block_id, &start_info) - .await - .unwrap(); - drop(storage); - - let tx = create_l2_transaction(10, 100); - let tx_hash = tx.hash(); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_call_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { - output: b"success!".to_vec(), - } - }); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender( - pool.clone(), - genesis_params.config().l2_chain_id, - tx_executor, - ) - .await; - let call_overrides = CallOverrides { - enforced_base_fee: None, - }; - let output = tx_sender - .eth_call(block_args, call_overrides, tx, None) - .await - .unwrap(); - assert_eq!(output, b"success!"); -} - -async fn create_real_tx_sender() -> TxSender { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - let genesis_params = GenesisParams::mock(); - insert_genesis_batch(&mut storage, &genesis_params) - .await - .unwrap(); - drop(storage); - - let genesis_config = genesis_params.config(); - let executor_options = SandboxExecutorOptions::new( - genesis_config.l2_chain_id, - AccountTreeId::new(genesis_config.fee_account), - u32::MAX, - ) - .await - .unwrap(); - - let pg_caches = PostgresStorageCaches::new(1, 1); - let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); - create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) - .await - .0 -} - -#[tokio::test] -async fn initial_gas_estimation_is_somewhat_accurate() { - let tx_sender = create_real_tx_sender().await; - - let alice = K256PrivateKey::random(); - let transfer_value = U256::from(1_000_000_000); - let account_overrides = OverrideAccount { - balance: Some(transfer_value * 2), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); - // fee params don't matter; we adjust via `adjust_transaction_fee()` - let tx = alice.create_transfer(transfer_value, 55, 555); - - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - let initial_estimate = estimator.initialize().await.unwrap(); - assert!(initial_estimate.gas_charged_for_pubdata > 0); - assert!(initial_estimate.operator_overhead > 0); - let total_gas_charged = initial_estimate.total_gas_charged.unwrap(); - assert!( - total_gas_charged - > initial_estimate.gas_charged_for_pubdata + initial_estimate.operator_overhead, - "{initial_estimate:?}" - ); - - // Check that a transaction fails if supplied with the lower bound. - let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() - + initial_estimate.operator_overhead; - assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); - let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); - - // A slightly larger limit should work. - let initial_pivot = total_gas_charged * 64 / 63; - let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); -} - -const LOAD_TEST_CASES: TestCases = test_casing::cases! {[ - LoadnextContractExecutionParams::default(), - // No storage modification - LoadnextContractExecutionParams { - writes: 0, - events: 0, - ..LoadnextContractExecutionParams::default() - }, - // Moderately deep recursion (very deep recursion is tested separately) - LoadnextContractExecutionParams { - recursive_calls: 10, - ..LoadnextContractExecutionParams::default() - }, - // No deploys - LoadnextContractExecutionParams { - deploys: 0, - ..LoadnextContractExecutionParams::default() - }, - // Lots of deploys - LoadnextContractExecutionParams { - deploys: 10, - ..LoadnextContractExecutionParams::default() - }, -]}; - -#[test_casing(5, LOAD_TEST_CASES)] -#[tokio::test] -async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractExecutionParams) { - let alice = K256PrivateKey::random(); - // Set the array length in the load test contract to 100, so that reads don't fail. - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(tx_params); - - test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; -} - -#[test_casing(2, [false, true])] -#[tokio::test] -async fn initial_estimate_for_deep_recursion(with_reads: bool) { - let alice = K256PrivateKey::random(); - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - - // Reads are chosen because they represent the worst case. Reads don't influence the amount of pubdata; - // i.e., they don't make it easier to execute a transaction because of additional gas reserved for pubdata. - // OTOH, reads still increase the amount of computational gas used on each nested call. - // - // Initial pivot multipliers below are the smallest ones with 0.1 precision. `DEFAULT_MULTIPLIER` works for smaller - // recursion depths because the transaction emits enough pubdata to cover gas deductions due to the 63/64 rule. - let depths_and_multipliers: &[_] = if with_reads { - &[(25, DEFAULT_MULTIPLIER), (50, 1.2), (75, 1.4), (100, 1.7)] - } else { - &[ - (50, DEFAULT_MULTIPLIER), - (75, 1.2), - (100, 1.4), - (125, 1.7), - (150, 2.1), - ] - }; - for &(recursion_depth, multiplier) in depths_and_multipliers { - println!("Testing recursion depth {recursion_depth}"); - let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { - recursive_calls: recursion_depth, - reads: if with_reads { 10 } else { 0 }, - ..LoadnextContractExecutionParams::empty() - }); - test_initial_estimate(state_override.clone(), tx, multiplier).await; - } -} - -#[tokio::test] -async fn initial_estimate_for_deep_recursion_with_large_bytecode() { - let alice = K256PrivateKey::random(); - let mut contract_bytecode = get_loadnext_contract().bytecode; - inflate_bytecode(&mut contract_bytecode, 50_000); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { - recursive_calls: 100, - ..LoadnextContractExecutionParams::empty() - }); - - test_initial_estimate(state_override, tx, 1.35).await; -} - -/// Tests the lower bound and initial pivot extracted from the initial estimate (one with effectively infinite gas amount). -/// Returns the VM result for a VM run with the initial pivot. -async fn test_initial_estimate( - state_override: StateOverride, - tx: L2Tx, - initial_pivot_multiplier: f64, -) -> VmExecutionResultAndLogs { - let tx_sender = create_real_tx_sender().await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - let initial_estimate = estimator.initialize().await.unwrap(); - - let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() - + initial_estimate.operator_overhead; - let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); - - // A slightly larger limit should work. - let initial_pivot = - (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; - let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); - vm_result -} - -async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { - let tx_sender = create_real_tx_sender().await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - estimator.initialize().await.unwrap_err() -} - -/// Estimates both transactions with initial writes and cleanup. -#[test_casing(4, [10, 50, 200, 1_000])] -#[tokio::test] -async fn initial_estimate_for_expensive_contract(write_count: usize) { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_expensive_contract_bytecode(); - let mut contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides.clone(), - )])); - let tx = alice.create_expensive_tx(write_count); - - let vm_result = test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; - - let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { - (*log.log.key.address() == EXPENSIVE_CONTRACT_ADDRESS) - .then_some((*log.log.key.key(), log.log.value)) - }); - let contract_logs: HashMap<_, _> = contract_logs.collect(); - assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); - contract_overrides.state = Some(OverrideState::StateDiff(contract_logs)); - - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides, - )])); - let tx = alice.create_expensive_cleanup_tx(); - - test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; -} - -#[tokio::test] -async fn initial_estimate_for_code_oracle_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_precompiles_contract_bytecode(); - let contract_bytecode_hash = hash_bytecode(&contract_bytecode); - let contract_keccak_hash = H256(keccak256(&contract_bytecode)); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - // Add another contract that is never executed, but has a large bytecode. - let huge_contact_address = Address::repeat_byte(23); - let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); - let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); - let huge_contract_overrides = OverrideAccount { - code: Some(Bytecode::new(huge_contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([ - (PRECOMPILES_CONTRACT_ADDRESS, contract_overrides), - (huge_contact_address, huge_contract_overrides), - ])); - - // Test contracts that are already decommitted when requested from the precompiles test contract. - let genesis_params = GenesisParams::mock(); - let code_oracle_bytecode = genesis_params - .system_contracts() - .iter() - .find_map(|contract| { - (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) - }) - .expect("no code oracle"); - let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); - let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); - - let warm_bytecode_hashes = [ - (code_oracle_bytecode_hash, code_oracle_keccak_hash), - (contract_bytecode_hash, contract_keccak_hash), - ]; - let mut decomitter_stats = 0.0; - for (hash, keccak_hash) in warm_bytecode_hashes { - println!("Testing bytecode: {hash:?}"); - let tx = alice.create_code_oracle_tx(hash, keccak_hash); - let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; - let stats = &vm_result.statistics.circuit_statistic; - decomitter_stats = stats.code_decommitter.max(decomitter_stats); - } - assert!(decomitter_stats > 0.0); - - println!("Testing large bytecode"); - let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); - let vm_result = test_initial_estimate(state_override, tx, 1.05).await; - // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones - let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; - assert!( - new_decomitter_stats > decomitter_stats * 1.5, - "old={decomitter_stats}, new={new_decomitter_stats}" - ); -} - -#[tokio::test] -async fn initial_estimate_with_large_free_bytecode() { - let alice = K256PrivateKey::random(); - let mut contract_bytecode = read_precompiles_contract_bytecode(); - inflate_bytecode(&mut contract_bytecode, 50_000); - let contract_bytecode_hash = hash_bytecode(&contract_bytecode); - let contract_keccak_hash = H256(keccak256(&contract_bytecode)); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([( - PRECOMPILES_CONTRACT_ADDRESS, - contract_overrides, - )])); - // Ask the test contract to decommit itself. This should refund the decommit costs, but it will be charged at first. - let tx = alice.create_code_oracle_tx(contract_bytecode_hash, contract_keccak_hash); - test_initial_estimate(state_override, tx, 1.05).await; -} - -#[tokio::test] -async fn revert_during_initial_estimate() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_counter_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - COUNTER_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_reverting_counter_tx(); - let err = test_initial_estimate_error(state_override, tx).await; - let SubmitTxError::ExecutionReverted(err, _) = err else { - panic!("Unexpected error: {err:?}"); - }; - assert_eq!(err, "This method always reverts"); -} - -#[tokio::test] -async fn out_of_gas_during_initial_estimate() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_infinite_loop_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - INFINITE_LOOP_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_infinite_loop_tx(); - let err = test_initial_estimate_error(state_override, tx).await; - // Unfortunately, we don't provide human-readable out-of-gas errors at the time - assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); -} - -#[tokio::test] -async fn insufficient_funds_error_for_transfer() { - let tx_sender = create_real_tx_sender().await; - - let alice = K256PrivateKey::random(); - let transfer_value = 1_000_000_000.into(); - // fee params don't matter; they should be overwritten by the estimation logic - let tx = alice.create_transfer(transfer_value, 55, 555); - let fee_scale_factor = 1.0; - // Without overrides, the transaction should fail because of insufficient balance. - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - 1_000, - None, - BinarySearchKind::Full, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::InsufficientFundsForTransfer); -} - -async fn test_estimating_gas( - state_override: StateOverride, - tx: L2Tx, - acceptable_overestimation: u64, -) { - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let fee = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - BinarySearchKind::Full, - ) - .await - .unwrap(); - // Sanity-check gas limit - let gas_limit_after_full_search = u64::try_from(fee.gas_limit).unwrap(); - assert!( - (10_000..10_000_000).contains(&gas_limit_after_full_search), - "{fee:?}" - ); - - let fee = tx_sender - .get_txs_fee_in_wei( - tx.into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - BinarySearchKind::Optimized, - ) - .await - .unwrap(); - let gas_limit_after_optimized_search = u64::try_from(fee.gas_limit).unwrap(); - - let diff = gas_limit_after_full_search.abs_diff(gas_limit_after_optimized_search); - assert!( - diff <= acceptable_overestimation, - "full={gas_limit_after_full_search}, optimized={gas_limit_after_optimized_search}" - ); -} - -#[test_casing(3, [0, 100, 1_000])] -#[tokio::test] -async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { - let alice = K256PrivateKey::random(); - let transfer_value = 1_000_000_000.into(); - let account_overrides = OverrideAccount { - balance: Some(transfer_value * 2), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); - // fee params don't matter; they should be overwritten by the estimation logic - let tx = alice.create_transfer(transfer_value, 55, 555); - - test_estimating_gas(state_override, tx, acceptable_overestimation).await; -} - -#[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] -#[tokio::test] -async fn estimating_gas_for_load_test_tx( - tx_params: LoadnextContractExecutionParams, - acceptable_overestimation: u64, -) { - let alice = K256PrivateKey::random(); - // Set the array length in the load test contract to 100, so that reads don't fail. - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(tx_params); - - test_estimating_gas(state_override, tx, acceptable_overestimation).await; -} - -#[test_casing(4, [10, 50, 100, 200])] -#[tokio::test] -async fn estimating_gas_for_expensive_txs(write_count: usize) { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_expensive_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides.clone(), - )])); - let tx = alice.create_expensive_tx(write_count); - - test_estimating_gas(state_override, tx, 0).await; -} - -#[tokio::test] -async fn estimating_gas_for_code_oracle_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_precompiles_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - // Add another contract that is never executed, but has a large bytecode. - let huge_contact_address = Address::repeat_byte(23); - let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); - let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); - let huge_contract_overrides = OverrideAccount { - code: Some(Bytecode::new(huge_contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([ - (PRECOMPILES_CONTRACT_ADDRESS, contract_overrides), - (huge_contact_address, huge_contract_overrides), - ])); - let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); - - test_estimating_gas(state_override, tx, 0).await; -} - -#[tokio::test] -async fn estimating_gas_for_reverting_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_counter_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - COUNTER_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_reverting_counter_tx(); - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let acceptable_overestimation = 0; - for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - binary_search_kind, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::ExecutionReverted(..)); - } -} - -#[tokio::test] -async fn estimating_gas_for_infinite_loop_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_infinite_loop_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - INFINITE_LOOP_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_infinite_loop_tx(); - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let acceptable_overestimation = 0; - for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - binary_search_kind, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); - } -} diff --git a/core/node/api_server/src/tx_sender/tests/call.rs b/core/node/api_server/src/tx_sender/tests/call.rs new file mode 100644 index 00000000000..e43f55b2b9a --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/call.rs @@ -0,0 +1,253 @@ +//! Tests for `eth_call`. + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::{ + api::state_override::OverrideAccount, transaction_request::CallRequest, K256PrivateKey, +}; + +use super::*; +use crate::testonly::{decode_u256_output, Call3Result, Call3Value, StateBuilder, TestAccount}; + +#[tokio::test] +async fn eth_call_requires_single_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_call_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { + output: b"success!".to_vec(), + } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender( + pool.clone(), + genesis_params.config().l2_chain_id, + tx_executor, + ) + .await; + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + let output = tx_sender + .eth_call(block_args, call_overrides, tx, None) + .await + .unwrap(); + assert_eq!(output, b"success!"); +} + +async fn test_call( + tx_sender: &TxSender, + state_override: StateOverride, + mut call: CallRequest, +) -> Result, SubmitTxError> { + call.gas = call.gas.max(Some(10_000_000.into())); + let call = L2Tx::from_request(call.into(), usize::MAX, true).unwrap(); + + let mut storage = tx_sender + .0 + .replica_connection_pool + .connection() + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + + tx_sender + .eth_call(block_args, call_overrides, call, Some(state_override)) + .await +} + +#[tokio::test] +async fn eth_call_with_balance() { + let alice = K256PrivateKey::random(); + let initial_balance = 123_456_789.into(); + let account_overrides = OverrideAccount { + balance: Some(initial_balance), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let call = alice.query_base_token_balance(); + let output = test_call(&tx_sender, state_override, call).await.unwrap(); + assert_eq!(decode_u256_output(&output), initial_balance); +} + +#[tokio::test] +async fn eth_call_with_transfer() { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let initial_balance = transfer_value * 5 / 3; + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_balance(alice.address(), initial_balance) + .build(); + + let transfer = alice.create_transfer(transfer_value); + let multicall = alice.multicall_with_value( + transfer_value, + &[transfer.into(), alice.query_base_token_balance().into()], + ); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + assert_eq!(call_results.len(), 2); + assert!( + call_results[0].success && call_results[1].success, + "{call_results:?}" + ); + assert!(call_results[0].return_data.is_empty(), "{call_results:?}"); + + let balance = call_results[1].as_u256(); + // The bootloader doesn't compute gas refunds in the call mode, so the equality is exact + assert_eq!(balance, initial_balance - transfer_value); +} + +#[tokio::test] +async fn eth_call_with_counter() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(42).build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call( + &tx_sender, + state_override.clone(), + alice.query_counter_value(), + ) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 42.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), false).into(); + let output = test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 45.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), true).into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!( + err, + SubmitTxError::ExecutionReverted(msg, _) if msg.contains("This method always reverts") + ); +} + +#[tokio::test] +async fn eth_call_with_counter_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_counter_contract(0) + .build(); + + let multicall = alice.multicall_with_value( + 0.into(), + &[ + alice.create_counter_tx(1.into(), false).into(), + Call3Value::from(alice.create_counter_tx(2.into(), true)).allow_failure(), + alice.query_counter_value().into(), + alice.create_counter_tx(3.into(), false).into(), + ], + ); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + + assert_eq!( + call_results + .iter() + .map(|result| result.success) + .collect::>(), + [true, false, true, true] + ); + let counter_values: Vec<_> = call_results + .iter() + .filter_map(|result| { + if !result.success { + return None; + } + Some(decode_u256_output(&result.return_data).as_u32()) + }) + .collect(); + assert_eq!(counter_values, [1, 1, 4]); +} + +#[tokio::test] +async fn eth_call_out_of_gas() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let tx_as_call = alice.create_infinite_loop_tx().into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); +} + +#[tokio::test] +async fn eth_call_with_load_test_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + // Deploys (naturally) don't work for calls, hence a separate set of test cases. + let load_test_cases_for_call = [ + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + deploys: 0, + recursive_calls: 20, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + reads: 100, + writes: 100, + ..LoadnextContractExecutionParams::empty() + }, + ]; + + for tx_params in load_test_cases_for_call { + println!("Executing {tx_params:?}"); + let tx_as_call = alice.create_load_test_tx(tx_params).into(); + test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + } +} diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs new file mode 100644 index 00000000000..4528d9cda12 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -0,0 +1,483 @@ +//! Tests for gas estimation (mostly with the real oneshot VM executor). + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; +use zksync_system_constants::CODE_ORACLE_ADDRESS; +use zksync_types::{ + api::state_override::{OverrideAccount, OverrideState}, + web3::keccak256, + K256PrivateKey, +}; +use zksync_utils::bytecode::hash_bytecode; + +use super::*; +use crate::{ + testonly::{StateBuilder, TestAccount}, + tx_sender::gas_estimation::GasEstimator, +}; + +/// Initial pivot multiplier empirically sufficient for most tx types. +const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; + +#[tokio::test] +async fn initial_gas_estimation_is_somewhat_accurate() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let alice = K256PrivateKey::random(); + let transfer_value = U256::from(1_000_000_000); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + assert!(initial_estimate.gas_charged_for_pubdata > 0); + assert!(initial_estimate.operator_overhead > 0); + let total_gas_charged = initial_estimate.total_gas_charged.unwrap(); + assert!( + total_gas_charged + > initial_estimate.gas_charged_for_pubdata + initial_estimate.operator_overhead, + "{initial_estimate:?}" + ); + + // Check that a transaction fails if supplied with the lower bound. + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = total_gas_charged * 64 / 63; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let alice = K256PrivateKey::random(); + // Set the array length in the load test contract to 100, so that reads don't fail. + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn initial_estimate_for_deep_recursion(with_reads: bool) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + // Reads are chosen because they represent the worst case. Reads don't influence the amount of pubdata; + // i.e., they don't make it easier to execute a transaction because of additional gas reserved for pubdata. + // OTOH, reads still increase the amount of computational gas used on each nested call. + // + // Initial pivot multipliers below are the smallest ones with 0.1 precision. `DEFAULT_MULTIPLIER` works for smaller + // recursion depths because the transaction emits enough pubdata to cover gas deductions due to the 63/64 rule. + let depths_and_multipliers: &[_] = if with_reads { + &[(25, DEFAULT_MULTIPLIER), (50, 1.2), (75, 1.4), (100, 1.7)] + } else { + &[ + (50, DEFAULT_MULTIPLIER), + (75, 1.2), + (100, 1.4), + (125, 1.7), + (150, 2.1), + ] + }; + for &(recursion_depth, multiplier) in depths_and_multipliers { + println!("Testing recursion depth {recursion_depth}"); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: recursion_depth, + reads: if with_reads { 10 } else { 0 }, + ..LoadnextContractExecutionParams::empty() + }); + test_initial_estimate(state_override.clone(), tx, multiplier).await; + } +} + +#[tokio::test] +async fn initial_estimate_for_deep_recursion_with_large_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_load_test_contract() + .inflate_bytecode(StateBuilder::LOAD_TEST_ADDRESS, 50_000) + .build(); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: 100, + ..LoadnextContractExecutionParams::empty() + }); + + test_initial_estimate(state_override, tx, 1.35).await; +} + +/// Tests the lower bound and initial pivot extracted from the initial estimate (one with effectively infinite gas amount). +/// Returns the VM result for a VM run with the initial pivot. +async fn test_initial_estimate( + state_override: StateOverride, + tx: L2Tx, + initial_pivot_multiplier: f64, +) -> VmExecutionResultAndLogs { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = + (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + vm_result +} + +async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + estimator.initialize().await.unwrap_err() +} + +/// Estimates both transactions with initial writes and cleanup. +#[test_casing(4, [10, 50, 200, 1_000])] +#[tokio::test] +async fn initial_estimate_for_expensive_contract(write_count: usize) { + let alice = K256PrivateKey::random(); + let mut state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + + let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { + (*log.log.key.address() == StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .then_some((*log.log.key.key(), log.log.value)) + }); + let contract_logs: HashMap<_, _> = contract_logs.collect(); + assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); + + state_override + .get_mut(&StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::StateDiff(contract_logs)); + let tx = alice.create_expensive_cleanup_tx(); + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[tokio::test] +async fn initial_estimate_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Test contracts that are already decommitted when requested from the precompiles test contract. + let genesis_params = GenesisParams::mock(); + let code_oracle_bytecode = genesis_params + .system_contracts() + .iter() + .find_map(|contract| { + (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) + }) + .expect("no code oracle"); + let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); + let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); + + let warm_bytecode_hashes = [ + (code_oracle_bytecode_hash, code_oracle_keccak_hash), + (contract_bytecode_hash, contract_keccak_hash), + ]; + let mut decomitter_stats = 0.0; + for (hash, keccak_hash) in warm_bytecode_hashes { + println!("Testing bytecode: {hash:?}"); + let tx = alice.create_code_oracle_tx(hash, keccak_hash); + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + let stats = &vm_result.statistics.circuit_statistic; + decomitter_stats = stats.code_decommitter.max(decomitter_stats); + } + assert!(decomitter_stats > 0.0); + + println!("Testing large bytecode"); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + let vm_result = test_initial_estimate(state_override, tx, 1.05).await; + // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones + let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; + assert!( + new_decomitter_stats > decomitter_stats * 1.5, + "old={decomitter_stats}, new={new_decomitter_stats}" + ); +} + +#[tokio::test] +async fn initial_estimate_with_large_free_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_precompiles_contract() + .inflate_bytecode(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS, 50_000) + .build(); + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Ask the test contract to decommit itself. This should refund the decommit costs, but it will be charged at first. + let tx = alice.create_code_oracle_tx(contract_bytecode_hash, contract_keccak_hash); + test_initial_estimate(state_override, tx, 1.05).await; +} + +#[tokio::test] +async fn revert_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let err = test_initial_estimate_error(state_override, tx).await; + let SubmitTxError::ExecutionReverted(err, _) = err else { + panic!("Unexpected error: {err:?}"); + }; + assert_eq!(err, "This method always reverts"); +} + +#[tokio::test] +async fn out_of_gas_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let err = test_initial_estimate_error(state_override, tx).await; + // Unfortunately, we don't provide human-readable out-of-gas errors at the time + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); +} + +#[tokio::test] +async fn insufficient_funds_error_for_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let alice = K256PrivateKey::random(); + let transferred_value = 1_000_000_000.into(); + let tx = alice.create_transfer(transferred_value); + let fee_scale_factor = 1.0; + // Without overrides, the transaction should fail because of insufficient balance. + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args, + fee_scale_factor, + 1_000, + None, + BinarySearchKind::Full, + ) + .await + .unwrap_err(); + assert_matches!( + err, + SubmitTxError::NotEnoughBalanceForFeeValue(balance, fee, value) + if balance.is_zero() && fee.is_zero() && value == transferred_value + ); +} + +async fn test_estimating_gas( + state_override: StateOverride, + tx: L2Tx, + acceptable_overestimation: u64, +) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let fee_scale_factor = 1.0; + let fee = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args.clone(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Full, + ) + .await + .unwrap(); + // Sanity-check gas limit + let gas_limit_after_full_search = u64::try_from(fee.gas_limit).unwrap(); + assert!( + (10_000..10_000_000).contains(&gas_limit_after_full_search), + "{fee:?}" + ); + + let fee = tx_sender + .get_txs_fee_in_wei( + tx.into(), + block_args, + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Optimized, + ) + .await + .unwrap(); + let gas_limit_after_optimized_search = u64::try_from(fee.gas_limit).unwrap(); + + let diff = gas_limit_after_full_search.abs_diff(gas_limit_after_optimized_search); + assert!( + diff <= acceptable_overestimation, + "full={gas_limit_after_full_search}, optimized={gas_limit_after_optimized_search}" + ); +} + +#[test_casing(3, [0, 100, 1_000])] +#[tokio::test] +async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] +#[tokio::test] +async fn estimating_gas_for_load_test_tx( + tx_params: LoadnextContractExecutionParams, + acceptable_overestimation: u64, +) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(4, [10, 50, 100, 200])] +#[tokio::test] +async fn estimating_gas_for_expensive_txs(write_count: usize) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_reverting_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args.clone(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); + } +} + +#[tokio::test] +async fn estimating_gas_for_infinite_loop_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args.clone(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); + } +} diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs new file mode 100644 index 00000000000..cacd616202d --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -0,0 +1,166 @@ +//! Tests for the transaction sender. + +use test_casing::TestCases; +use zksync_contracts::test_contracts::LoadnextContractExecutionParams; +use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; +use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_types::{get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; +use zksync_vm_executor::oneshot::MockOneshotExecutor; + +use super::*; +use crate::web3::testonly::create_test_tx_sender; + +mod call; +mod gas_estimation; +mod send_tx; + +const LOAD_TEST_CASES: TestCases = test_casing::cases! {[ + LoadnextContractExecutionParams::default(), + // No storage modification + LoadnextContractExecutionParams { + writes: 0, + events: 0, + ..LoadnextContractExecutionParams::default() + }, + // Moderately deep recursion (very deep recursion is tested separately) + LoadnextContractExecutionParams { + recursive_calls: 10, + ..LoadnextContractExecutionParams::default() + }, + // No deploys + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + // Lots of deploys + LoadnextContractExecutionParams { + deploys: 10, + ..LoadnextContractExecutionParams::default() + }, +]}; + +#[tokio::test] +async fn getting_nonce_for_account() { + let l2_chain_id = L2ChainId::default(); + let test_address = Address::repeat_byte(1); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + // Manually insert a nonce for the address. + let nonce_key = get_nonce_key(&test_address); + let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(123)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[nonce_log]) + .await + .unwrap(); + + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(123)); + + // Insert another L2 block with a new nonce log. + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(1)) + .await + .unwrap(); + let nonce_log = StorageLog { + value: H256::from_low_u64_be(321), + ..nonce_log + }; + storage + .storage_logs_dal() + .insert_storage_logs(L2BlockNumber(1), &[nonce_log]) + .await + .unwrap(); + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(321)); + let missing_address = Address::repeat_byte(0xff); + let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); + assert_eq!(nonce, Nonce(0)); +} + +#[tokio::test] +async fn getting_nonce_for_account_after_snapshot_recovery() { + const SNAPSHOT_L2_BLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); + + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + let test_address = Address::repeat_byte(1); + let other_address = Address::repeat_byte(2); + let nonce_logs = [ + StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), + StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), + ]; + prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(23), + SNAPSHOT_L2_BLOCK_NUMBER, + &nonce_logs, + ) + .await; + + let l2_chain_id = L2ChainId::default(); + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(SNAPSHOT_L2_BLOCK_NUMBER.0 + 1)) + .await + .unwrap(); + let new_nonce_logs = vec![StorageLog::new_write_log( + get_nonce_key(&test_address), + H256::from_low_u64_be(321), + )]; + storage + .storage_logs_dal() + .insert_storage_logs(SNAPSHOT_L2_BLOCK_NUMBER + 1, &new_nonce_logs) + .await + .unwrap(); + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(321)); + let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); + assert_eq!(nonce, Nonce(25)); + let missing_address = Address::repeat_byte(0xff); + let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); + assert_eq!(nonce, Nonce(0)); +} + +async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + drop(storage); + + let genesis_config = genesis_params.config(); + let executor_options = SandboxExecutorOptions::new( + genesis_config.l2_chain_id, + AccountTreeId::new(genesis_config.fee_account), + u32::MAX, + ) + .await + .unwrap(); + + let pg_caches = PostgresStorageCaches::new(1, 1); + let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); + create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) + .await + .0 +} + +async fn pending_block_args(tx_sender: &TxSender) -> BlockArgs { + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + BlockArgs::pending(&mut storage).await.unwrap() +} diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs new file mode 100644 index 00000000000..fdd63254cf0 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -0,0 +1,300 @@ +//! Tests for sending raw transactions. + +use assert_matches::assert_matches; +use test_casing::test_casing; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_fee_model::MockBatchFeeParamsProvider; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::K256PrivateKey; + +use super::*; +use crate::testonly::{StateBuilder, TestAccount}; + +#[tokio::test] +async fn submitting_tx_requires_one_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + let tx_hash = tx.hash(); + + // Manually set sufficient balance for the tx initiator. + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_tx_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { output: vec![] } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let block_args = pending_block_args(&tx_sender).await; + + let submission_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(submission_result.0, L2TxSubmissionResult::Added); + + let mut storage = pool.connection().await.unwrap(); + storage + .transactions_web3_dal() + .get_transaction_by_hash(tx_hash, l2_chain_id) + .await + .unwrap() + .expect("transaction is not persisted"); +} + +#[tokio::test] +async fn nonce_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + drop(storage); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let mut tx = create_l2_transaction(55, 555); + + tx_sender.validate_account_nonce(&tx).await.unwrap(); + // There should be some leeway with the nonce validation. + tx.common_data.nonce = Nonce(1); + tx_sender.validate_account_nonce(&tx).await.unwrap(); + + tx.common_data.nonce = Nonce(10_000); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 0 + ); + + let mut storage = pool.connection().await.unwrap(); + let nonce_key = get_nonce_key(&tx.initiator_account()); + let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(42)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[nonce_log]) + .await + .unwrap(); + drop(storage); + + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 42 + ); + + tx.common_data.nonce = Nonce(5); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooLow(from, _, actual) if actual == 5 && from == 42 + ); +} + +#[tokio::test] +async fn fee_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + // Sanity check: validation should succeed with reasonable fee params. + tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap(); + + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = 100.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::IntrinsicGas); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = u64::MAX.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::GasLimitIsTooBig); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_fee_per_gas = 1.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxFeePerGasTooLow); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_priority_fee_per_gas = tx.common_data.fee.max_fee_per_gas * 2; + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxPriorityFeeGreaterThanMaxFee); + } +} + +#[tokio::test] +async fn sending_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + // Manually set sufficient balance for the tx initiator. + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let transfer = alice.create_transfer(1_000_000_000.into()); + let (sub_result, vm_result) = tx_sender.submit_tx(transfer, block_args).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_transfer_with_insufficient_balance() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let transfer = alice.create_transfer(transfer_value); + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NotEnoughBalanceForFeeValue(balance, _, value) if balance.is_zero() + && value == transfer_value + ); +} + +#[tokio::test] +async fn sending_transfer_with_incorrect_signature() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut transfer = alice.create_transfer(transfer_value); + transfer.execute.value = transfer_value / 2; // This should invalidate tx signature + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); + assert_matches!(err, SubmitTxError::ValidationFailed(_)); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_load_test_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_load_test_tx(tx_params); + let (sub_result, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_reverting_transaction() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_counter_contract(0) + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_counter_tx(1.into(), true); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } if output.to_string().contains("This method always reverts") + ); +} + +#[tokio::test] +async fn sending_transaction_out_of_gas() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_infinite_loop_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_infinite_loop_tx(); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); +} diff --git a/core/node/api_server/src/utils.rs b/core/node/api_server/src/utils.rs index 6769e773dc7..c7a1134682b 100644 --- a/core/node/api_server/src/utils.rs +++ b/core/node/api_server/src/utils.rs @@ -6,9 +6,33 @@ use std::{ time::{Duration, Instant}, }; +use anyhow::Context; use zksync_dal::{Connection, Core, DalError}; +use zksync_multivm::circuit_sequencer_api_latest::boojum::ethereum_types::U256; use zksync_web3_decl::error::Web3Error; +pub(crate) fn prepare_evm_bytecode(raw: &[u8]) -> anyhow::Result> { + // EVM bytecodes are prefixed with a big-endian `U256` bytecode length. + let bytecode_len_bytes = raw.get(..32).context("length < 32")?; + let bytecode_len = U256::from_big_endian(bytecode_len_bytes); + let bytecode_len: usize = bytecode_len + .try_into() + .map_err(|_| anyhow::anyhow!("length ({bytecode_len}) overflow"))?; + let bytecode = raw.get(32..(32 + bytecode_len)).with_context(|| { + format!( + "prefixed length ({bytecode_len}) exceeds real length ({})", + raw.len() - 32 + ) + })?; + // Since slicing above succeeded, this one is safe. + let padding = &raw[(32 + bytecode_len)..]; + anyhow::ensure!( + padding.iter().all(|&b| b == 0), + "bytecode padding contains non-zero bytes" + ); + Ok(bytecode.to_vec()) +} + /// Opens a readonly transaction over the specified connection. pub(crate) async fn open_readonly_transaction<'r>( conn: &'r mut Connection<'_, Core>, @@ -66,3 +90,15 @@ macro_rules! report_filter { ReportFilter::new($interval, &LAST_TIMESTAMP) }}; } + +#[cfg(test)] +mod tests { + use super::*; + use crate::testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}; + + #[test] + fn preparing_evm_bytecode() { + let prepared = prepare_evm_bytecode(RAW_EVM_BYTECODE).unwrap(); + assert_eq!(prepared, PROCESSED_EVM_BYTECODE); + } +} diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs index de763526373..9f5e54a5f4f 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs @@ -37,6 +37,15 @@ impl EnNamespaceServer for EnNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn block_metadata( + &self, + block_number: L2BlockNumber, + ) -> RpcResult> { + self.block_metadata_impl(block_number) + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn sync_tokens(&self, block_number: Option) -> RpcResult> { self.sync_tokens_impl(block_number) .await diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs index 3fdba8e78ce..93f0205c77f 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs @@ -4,7 +4,7 @@ use zksync_types::{ Log, Transaction, TransactionId, TransactionReceipt, TransactionVariant, }, transaction_request::CallRequest, - web3::{Bytes, Index, SyncState}, + web3::{Bytes, Index, SyncState, U64Number}, Address, H256, U256, U64, }; use zksync_web3_decl::{ @@ -260,16 +260,20 @@ impl EthNamespaceServer for EthNamespace { async fn fee_history( &self, - block_count: U64, + block_count: U64Number, newest_block: BlockNumber, reward_percentiles: Option>, ) -> RpcResult { self.fee_history_impl( - block_count, + block_count.into(), newest_block, reward_percentiles.unwrap_or_default(), ) .await .map_err(|err| self.current_method().map_err(err)) } + + async fn max_priority_fee_per_gas(&self) -> RpcResult { + Ok(self.max_priority_fee_per_gas_impl()) + } } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index b55c6ca5946..6ae1fec5b94 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -55,7 +55,7 @@ impl ZksNamespaceServer for ZksNamespace { } async fn get_bridge_contracts(&self) -> RpcResult { - Ok(self.get_bridge_contracts_impl()) + Ok(self.get_bridge_contracts_impl().await) } async fn l1_chain_id(&self) -> RpcResult { diff --git a/core/node/api_server/src/web3/mod.rs b/core/node/api_server/src/web3/mod.rs index bad1b493a5f..620e9185078 100644 --- a/core/node/api_server/src/web3/mod.rs +++ b/core/node/api_server/src/web3/mod.rs @@ -47,6 +47,7 @@ use self::{ use crate::{ execution_sandbox::{BlockStartInfo, VmConcurrencyBarrier}, tx_sender::TxSender, + web3::state::BridgeAddressesHandle, }; pub mod backend_jsonrpsee; @@ -143,7 +144,6 @@ struct OptionalApiParams { #[derive(Debug)] pub struct ApiServer { pool: ConnectionPool, - updaters_pool: ConnectionPool, health_updater: Arc, config: InternalApiConfig, transport: ApiTransport, @@ -153,18 +153,21 @@ pub struct ApiServer { namespaces: Vec, method_tracer: Arc, optional: OptionalApiParams, + bridge_addresses_handle: BridgeAddressesHandle, + sealed_l2_block_handle: SealedL2BlockNumber, } #[derive(Debug)] pub struct ApiBuilder { pool: ConnectionPool, - updaters_pool: ConnectionPool, config: InternalApiConfig, polling_interval: Duration, pruning_info_refresh_interval: Duration, // Mandatory params that must be set using builder methods. transport: Option, tx_sender: Option, + bridge_addresses_handle: Option, + sealed_l2_block_handle: Option, // Optional params that may or may not be set using builder methods. We treat `namespaces` // specially because we want to output a warning if they are not set. namespaces: Option>, @@ -178,13 +181,14 @@ impl ApiBuilder { pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { Self { - updaters_pool: pool.clone(), pool, config, polling_interval: Self::DEFAULT_POLLING_INTERVAL, pruning_info_refresh_interval: Self::DEFAULT_PRUNING_INFO_REFRESH_INTERVAL, transport: None, tx_sender: None, + bridge_addresses_handle: None, + sealed_l2_block_handle: None, namespaces: None, method_tracer: Arc::new(MethodTracer::default()), optional: OptionalApiParams::default(), @@ -201,15 +205,6 @@ impl ApiBuilder { self } - /// Configures a dedicated DB pool to be used for updating different information, - /// such as last mined block number or account nonces. This pool is used to execute - /// in a background task. If not called, the main pool will be used. If the API server is under high load, - /// it may make sense to supply a single-connection pool to reduce pool contention with the API methods. - pub fn with_updaters_pool(mut self, pool: ConnectionPool) -> Self { - self.updaters_pool = pool; - self - } - pub fn with_tx_sender(mut self, tx_sender: TxSender) -> Self { self.tx_sender = Some(tx_sender); self @@ -285,6 +280,22 @@ impl ApiBuilder { self } + pub fn with_sealed_l2_block_handle( + mut self, + sealed_l2_block_handle: SealedL2BlockNumber, + ) -> Self { + self.sealed_l2_block_handle = Some(sealed_l2_block_handle); + self + } + + pub fn with_bridge_addresses_handle( + mut self, + bridge_addresses_handle: BridgeAddressesHandle, + ) -> Self { + self.bridge_addresses_handle = Some(bridge_addresses_handle); + self + } + // Intended for tests only. #[doc(hidden)] fn with_pub_sub_events(mut self, sender: mpsc::UnboundedSender) -> Self { @@ -312,7 +323,6 @@ impl ApiBuilder { Ok(ApiServer { pool: self.pool, health_updater: Arc::new(health_updater), - updaters_pool: self.updaters_pool, config: self.config, transport, tx_sender: self.tx_sender.context("Transaction sender not set")?, @@ -326,6 +336,12 @@ impl ApiBuilder { }), method_tracer: self.method_tracer, optional: self.optional, + sealed_l2_block_handle: self + .sealed_l2_block_handle + .context("Sealed l2 block handle not set")?, + bridge_addresses_handle: self + .bridge_addresses_handle + .context("Bridge addresses handle not set")?, }) } } @@ -335,11 +351,8 @@ impl ApiServer { self.health_updater.subscribe() } - async fn build_rpc_state( - self, - last_sealed_l2_block: SealedL2BlockNumber, - ) -> anyhow::Result { - let mut storage = self.updaters_pool.connection_tagged("api").await?; + async fn build_rpc_state(self) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("api").await?; let start_info = BlockStartInfo::new(&mut storage, self.pruning_info_refresh_interval).await?; drop(storage); @@ -363,7 +376,8 @@ impl ApiServer { api_config: self.config, start_info, mempool_cache: self.optional.mempool_cache, - last_sealed_l2_block, + last_sealed_l2_block: self.sealed_l2_block_handle, + bridge_addresses_handle: self.bridge_addresses_handle, tree_api: self.optional.tree_api, }) } @@ -371,11 +385,10 @@ impl ApiServer { async fn build_rpc_module( self, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, ) -> anyhow::Result> { let namespaces = self.namespaces.clone(); let zksync_network_id = self.config.l2_chain_id; - let rpc_state = self.build_rpc_state(last_sealed_l2_block).await?; + let rpc_state = self.build_rpc_state().await?; // Collect all the methods into a single RPC module. let mut rpc = RpcModule::new(()); @@ -473,21 +486,9 @@ impl ApiServer { self, stop_receiver: watch::Receiver, ) -> anyhow::Result { - // Chosen to be significantly smaller than the interval between L2 blocks, but larger than - // the latency of getting the latest sealed L2 block number from Postgres. If the API server - // processes enough requests, information about the latest sealed L2 block will be updated - // by reporting block difference metrics, so the actual update lag would be much smaller than this value. - const SEALED_L2_BLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25); - let transport = self.transport; + let mut tasks = vec![]; - let (last_sealed_l2_block, sealed_l2_block_update_task) = SealedL2BlockNumber::new( - self.updaters_pool.clone(), - SEALED_L2_BLOCK_UPDATE_INTERVAL, - stop_receiver.clone(), - ); - - let mut tasks = vec![tokio::spawn(sealed_l2_block_update_task)]; let pub_sub = if matches!(transport, ApiTransport::WebSocket(_)) && self.namespaces.contains(&Namespace::Pubsub) { @@ -510,12 +511,8 @@ impl ApiServer { // framework it'll no longer be needed. let health_check = self.health_updater.subscribe(); let (local_addr_sender, local_addr) = oneshot::channel(); - let server_task = tokio::spawn(self.run_jsonrpsee_server( - stop_receiver, - pub_sub, - last_sealed_l2_block, - local_addr_sender, - )); + let server_task = + tokio::spawn(self.run_jsonrpsee_server(stop_receiver, pub_sub, local_addr_sender)); tasks.push(server_task); Ok(ApiServerHandles { @@ -584,7 +581,6 @@ impl ApiServer { self, mut stop_receiver: watch::Receiver, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, local_addr_sender: oneshot::Sender, ) -> anyhow::Result<()> { let transport = self.transport; @@ -640,7 +636,7 @@ impl ApiServer { tracing::info!("Enabled extended call tracing for {transport_str} API server; this might negatively affect performance"); } - let rpc = self.build_rpc_module(pub_sub, last_sealed_l2_block).await?; + let rpc = self.build_rpc_module(pub_sub).await?; let registered_method_names = Arc::new(rpc.method_names().collect::>()); tracing::debug!( "Built RPC module for {transport_str} server with {} methods: {registered_method_names:?}", diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 71560e4ddb8..726f35ac29a 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -7,7 +7,7 @@ use zksync_types::{ BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, DebugCall, DebugCallType, ResultDebugCall, SupportedTracers, TracerConfig, }, - debug_flat_call::{Action, CallResult, DebugCallFlat}, + debug_flat_call::{Action, CallResult, CallTraceMeta, DebugCallFlat, ResultDebugCallFlat}, l2::L2Tx, transaction_request::CallRequest, web3, H256, U256, @@ -31,8 +31,7 @@ impl DebugNamespace { pub(crate) fn map_call( call: Call, - index: usize, - transaction_hash: H256, + meta: CallTraceMeta, tracer_option: TracerConfig, ) -> CallTracerResult { match tracer_option.tracer { @@ -42,14 +41,13 @@ impl DebugNamespace { )), SupportedTracers::FlatCallTracer => { let mut calls = vec![]; - let mut traces = vec![index]; + let mut traces = vec![meta.index_in_block]; Self::flatten_call( call, &mut calls, &mut traces, tracer_option.tracer_config.only_top_call, - index, - transaction_hash, + &meta, ); CallTracerResult::FlatCallTrace(calls) } @@ -89,8 +87,7 @@ impl DebugNamespace { calls: &mut Vec, trace_address: &mut Vec, only_top_call: bool, - transaction_position: usize, - transaction_hash: H256, + meta: &CallTraceMeta, ) { let subtraces = call.calls.len(); let debug_type = match call.r#type { @@ -120,22 +117,17 @@ impl DebugNamespace { result, subtraces, trace_address: trace_address.clone(), // Clone the current trace address - transaction_position, - transaction_hash, + transaction_position: meta.index_in_block, + transaction_hash: meta.tx_hash, + block_number: meta.block_number, + block_hash: meta.block_hash, r#type: DebugCallType::Call, }); if !only_top_call { for (number, call) in call.calls.into_iter().enumerate() { trace_address.push(number); - Self::flatten_call( - call, - calls, - trace_address, - false, - transaction_position, - transaction_hash, - ); + Self::flatten_call(call, calls, trace_address, false, meta); trace_address.pop(); } } @@ -158,6 +150,7 @@ impl DebugNamespace { let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; + // let block_hash = block_hash self.state. self.current_method() .set_block_diff(self.state.last_sealed_l2_block.diff(block_number)); @@ -172,25 +165,31 @@ impl DebugNamespace { SupportedTracers::CallTracer => CallTracerBlockResult::CallTrace( call_traces .into_iter() - .map(|(call, _, _)| ResultDebugCall { + .map(|(call, _)| ResultDebugCall { result: Self::map_default_call(call, options.tracer_config.only_top_call), }) .collect(), ), SupportedTracers::FlatCallTracer => { - let mut flat_calls = vec![]; - for (call, tx_hash, tx_index) in call_traces { - let mut traces = vec![tx_index]; - Self::flatten_call( - call, - &mut flat_calls, - &mut traces, - options.tracer_config.only_top_call, - tx_index, - tx_hash, - ); - } - CallTracerBlockResult::FlatCallTrace(flat_calls) + let res = call_traces + .into_iter() + .map(|(call, meta)| { + let mut traces = vec![meta.index_in_block]; + let mut flat_calls = vec![]; + Self::flatten_call( + call, + &mut flat_calls, + &mut traces, + options.tracer_config.only_top_call, + &meta, + ); + ResultDebugCallFlat { + tx_hash: meta.tx_hash, + result: flat_calls, + } + }) + .collect(); + CallTracerBlockResult::FlatCallTrace(res) } }; Ok(result) @@ -207,13 +206,8 @@ impl DebugNamespace { .get_call_trace(tx_hash) .await .map_err(DalError::generalize)?; - Ok(call_trace.map(|(call_trace, index_in_block)| { - Self::map_call( - call_trace, - index_in_block, - tx_hash, - options.unwrap_or_default(), - ) + Ok(call_trace.map(|(call_trace, meta)| { + Self::map_call(call_trace, meta, options.unwrap_or_default()) })) } @@ -259,7 +253,11 @@ impl DebugNamespace { }; let call_overrides = request.get_call_overrides()?; - let call = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; + let call = L2Tx::from_request( + request.into(), + MAX_ENCODED_TX_SIZE, + block_args.use_evm_emulator(), + )?; let vm_permit = self .state @@ -301,8 +299,6 @@ impl DebugNamespace { )) } }; - // It's a call request, it's safe to keep it zero - let hash = H256::zero(); let call = Call::new_high_level( call.common_data.fee.gas_limit.as_u64(), result.vm.statistics.gas_used, @@ -312,6 +308,12 @@ impl DebugNamespace { revert_reason, result.call_traces, ); - Ok(Self::map_call(call, 0, hash, options)) + let number = block_args.resolved_block_number(); + let meta = CallTraceMeta { + block_number: number.0, + // It's a call request, it's safe to everything as default + ..Default::default() + }; + Ok(Self::map_call(call, meta, options)) } } diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index a412c064fac..a09a0cb92fc 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -1,5 +1,6 @@ use anyhow::Context as _; use zksync_config::{configs::EcosystemContracts, GenesisConfig}; +use zksync_consensus_roles::validator; use zksync_dal::{CoreDal, DalError}; use zksync_types::{ api::en, protocol_version::ProtocolSemanticVersion, tokens::TokenInfo, Address, L1BatchNumber, @@ -86,6 +87,36 @@ impl EnNamespace { ))) } + #[tracing::instrument(skip(self))] + pub async fn block_metadata_impl( + &self, + block_number: L2BlockNumber, + ) -> Result, Web3Error> { + let Some(meta) = self + .state + .acquire_connection() + .await? + // unwrap is ok, because we start outermost transaction. + .transaction_builder() + .unwrap() + // run readonly transaction to perform consistent reads. + .set_readonly() + .build() + .await + .context("TransactionBuilder::build()")? + .consensus_dal() + .block_metadata(validator::BlockNumber(block_number.0.into())) + .await? + else { + return Ok(None); + }; + Ok(Some(en::BlockMetadata( + zksync_protobuf::serde::Serialize + .proto_fmt(&meta, serde_json::value::Serializer) + .unwrap(), + ))) + } + pub(crate) fn current_method(&self) -> &MethodTracer { &self.state.current_method } @@ -177,6 +208,10 @@ impl EnNamespace { genesis_commitment: Some(genesis_batch.metadata.commitment), bootloader_hash: Some(genesis_batch.header.base_system_contracts_hashes.bootloader), default_aa_hash: Some(genesis_batch.header.base_system_contracts_hashes.default_aa), + evm_emulator_hash: genesis_batch + .header + .base_system_contracts_hashes + .evm_emulator, l1_chain_id: self.state.api_config.l1_chain_id, sl_chain_id: Some(self.state.api_config.l1_chain_id.into()), l2_chain_id: self.state.api_config.l2_chain_id, diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 65de6cee7fa..ee37cb989f1 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -12,15 +12,16 @@ use zksync_types::{ web3::{self, Bytes, SyncInfo, SyncState}, AccountTreeId, L2BlockNumber, StorageKey, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{bytecode::BytecodeMarker, u256_to_h256}; use zksync_web3_decl::{ error::Web3Error, types::{Address, Block, Filter, FilterChanges, Log, U64}, }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, - utils::open_readonly_transaction, + utils::{open_readonly_transaction, prepare_evm_bytecode}, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, }; @@ -77,7 +78,12 @@ impl EthNamespace { drop(connection); let call_overrides = request.get_call_overrides()?; - let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; + let tx = L2Tx::from_request( + request.into(), + self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), + )?; + // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. let call_result: Vec = self .state @@ -107,10 +113,13 @@ impl EthNamespace { let is_eip712 = request_with_gas_per_pubdata_overridden .eip712_meta .is_some(); - + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx: L2Tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // The user may not include the proper transaction type during the estimation of @@ -136,6 +145,7 @@ impl EthNamespace { .tx_sender .get_txs_fee_in_wei( tx.into(), + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -387,7 +397,22 @@ impl EthNamespace { .get_contract_code_unchecked(address, block_number) .await .map_err(DalError::generalize)?; - Ok(contract_code.unwrap_or_default().into()) + let Some(contract_code) = contract_code else { + return Ok(Bytes::default()); + }; + // Check if the bytecode is an EVM bytecode, and if so, pre-process it correspondingly. + let marker = BytecodeMarker::new(contract_code.bytecode_hash); + let prepared_bytecode = if marker == Some(BytecodeMarker::Evm) { + prepare_evm_bytecode(&contract_code.bytecode).with_context(|| { + format!( + "malformed EVM bytecode at address {address:?}, hash = {:?}", + contract_code.bytecode_hash + ) + })? + } else { + contract_code.bytecode + }; + Ok(prepared_bytecode.into()) } pub fn chain_id_impl(&self) -> U64 { @@ -618,10 +643,15 @@ impl EthNamespace { } pub async fn send_raw_transaction_impl(&self, tx_bytes: Bytes) -> Result { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|_| hash).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); @@ -653,7 +683,7 @@ impl EthNamespace { pub async fn fee_history_impl( &self, - block_count: U64, + block_count: u64, newest_block: BlockNumber, reward_percentiles: Vec, ) -> Result { @@ -661,10 +691,7 @@ impl EthNamespace { .set_block_id(BlockId::Number(newest_block)); // Limit `block_count`. - let block_count = block_count - .as_u64() - .min(self.state.api_config.fee_history_limit) - .max(1); + let block_count = block_count.clamp(1, self.state.api_config.fee_history_limit); let mut connection = self.state.acquire_connection().await?; let newest_l2_block = self @@ -836,6 +863,11 @@ impl EthNamespace { } }) } + + pub fn max_priority_fee_per_gas_impl(&self) -> U256 { + // ZKsync does not require priority fee. + 0u64.into() + } } // Bogus methods. diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 34010785c52..f8b374b35ba 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, convert::TryInto}; +use std::collections::HashMap; use anyhow::Context as _; use once_cell::sync::Lazy; @@ -37,6 +37,7 @@ use zksync_web3_decl::{ }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, RpcState}, @@ -81,16 +82,21 @@ impl ZksNamespace { eip712_meta.gas_per_pubdata = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); } + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // When we're estimating fee, we are trying to deduce values related to fee, so we should // not consider provided ones. tx.common_data.fee.max_priority_fee_per_gas = 0u64.into(); tx.common_data.fee.gas_per_pubdata_limit = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); - self.estimate_fee(tx.into(), state_override).await + self.estimate_fee(tx.into(), block_args, state_override) + .await } pub async fn estimate_l1_to_l2_gas_impl( @@ -107,17 +113,25 @@ impl ZksNamespace { } } - let tx: L1Tx = request_with_gas_per_pubdata_overridden - .try_into() - .map_err(Web3Error::SerializationError)?; - - let fee = self.estimate_fee(tx.into(), state_override).await?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let tx = L1Tx::from_request( + request_with_gas_per_pubdata_overridden, + block_args.use_evm_emulator(), + ) + .map_err(Web3Error::SerializationError)?; + + let fee = self + .estimate_fee(tx.into(), block_args, state_override) + .await?; Ok(fee.gas_limit) } async fn estimate_fee( &self, tx: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { let scale_factor = self.state.api_config.estimate_gas_scale_factor; @@ -130,6 +144,7 @@ impl ZksNamespace { .tx_sender .get_txs_fee_in_wei( tx, + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -150,8 +165,8 @@ impl ZksNamespace { self.state.api_config.l2_testnet_paymaster_addr } - pub fn get_bridge_contracts_impl(&self) -> BridgeAddresses { - self.state.api_config.bridge_addresses.clone() + pub async fn get_bridge_contracts_impl(&self) -> BridgeAddresses { + self.state.bridge_addresses_handle.read().await } pub fn l1_chain_id_impl(&self) -> U64 { @@ -1153,10 +1168,15 @@ impl ZksNamespace { &self, tx_bytes: Bytes, ) -> Result<(H256, VmExecutionResultAndLogs), Web3Error> { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|result| (hash, result.1)).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 2ebc1b0c9fa..ff9f7af4a87 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -4,13 +4,13 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::{Duration, Instant}, + time::Instant, }; use anyhow::Context as _; use futures::TryFutureExt; use lru::LruCache; -use tokio::sync::{watch, Mutex}; +use tokio::sync::{Mutex, RwLock}; use vise::GaugeGuard; use zksync_config::{ configs::{api::Web3JsonRpcConfig, ContractsConfig}, @@ -195,51 +195,16 @@ impl InternalApiConfig { /// Thread-safe updatable information about the last sealed L2 block number. /// /// The information may be temporarily outdated and thus should only be used where this is OK -/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`] -/// and on an interval specified when creating an instance. -#[derive(Debug, Clone)] -pub(crate) struct SealedL2BlockNumber(Arc); +/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`]. +#[derive(Debug, Clone, Default)] +pub struct SealedL2BlockNumber(Arc); impl SealedL2BlockNumber { - /// Creates a handle to the last sealed L2 block number together with a task that will update - /// it on a schedule. - pub fn new( - connection_pool: ConnectionPool, - update_interval: Duration, - stop_receiver: watch::Receiver, - ) -> (Self, impl Future>) { - let this = Self(Arc::default()); - let number_updater = this.clone(); - - let update_task = async move { - loop { - if *stop_receiver.borrow() { - tracing::debug!("Stopping latest sealed L2 block updates"); - return Ok(()); - } - - let mut connection = connection_pool.connection_tagged("api").await.unwrap(); - let Some(last_sealed_l2_block) = - connection.blocks_dal().get_sealed_l2_block_number().await? - else { - tokio::time::sleep(update_interval).await; - continue; - }; - drop(connection); - - number_updater.update(last_sealed_l2_block); - tokio::time::sleep(update_interval).await; - } - }; - - (this, update_task) - } - /// Potentially updates the last sealed L2 block number by comparing it to the provided /// sealed L2 block number (not necessarily the last one). /// /// Returns the last sealed L2 block number after the update. - fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { + pub fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { let prev_value = self .0 .fetch_max(maybe_newer_l2_block_number.0, Ordering::Relaxed); @@ -253,7 +218,7 @@ impl SealedL2BlockNumber { /// Returns the difference between the latest L2 block number and the resolved L2 block number /// from `block_args`. - pub fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { + pub(crate) fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { // We compute the difference in any case, since it may update the stored value. let diff = self.diff(block_args.resolved_block_number()); @@ -265,6 +230,23 @@ impl SealedL2BlockNumber { } } +#[derive(Debug, Clone)] +pub struct BridgeAddressesHandle(Arc>); + +impl BridgeAddressesHandle { + pub fn new(bridge_addresses: api::BridgeAddresses) -> Self { + Self(Arc::new(RwLock::new(bridge_addresses))) + } + + pub async fn update(&self, bridge_addresses: api::BridgeAddresses) { + *self.0.write().await = bridge_addresses; + } + + pub async fn read(&self) -> api::BridgeAddresses { + self.0.read().await.clone() + } +} + /// Holder for the data required for the API to be functional. #[derive(Debug, Clone)] pub(crate) struct RpcState { @@ -280,15 +262,23 @@ pub(crate) struct RpcState { pub(super) start_info: BlockStartInfo, pub(super) mempool_cache: Option, pub(super) last_sealed_l2_block: SealedL2BlockNumber, + pub(super) bridge_addresses_handle: BridgeAddressesHandle, } impl RpcState { - pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { + pub fn parse_transaction_bytes( + &self, + bytes: &[u8], + block_args: &BlockArgs, + ) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.api_config.l2_chain_id; let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id)?; - Ok(( - L2Tx::from_request(tx_request, self.api_config.max_tx_size)?, + L2Tx::from_request( + tx_request, + self.api_config.max_tx_size, + block_args.use_evm_emulator(), + )?, hash, )) } diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 93309fc09cf..2d642b9a04b 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -13,7 +13,10 @@ use zksync_types::L2ChainId; use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::{metrics::ApiTransportLabel, *}; -use crate::{execution_sandbox::SandboxExecutor, tx_sender::TxSenderConfig}; +use crate::{ + execution_sandbox::SandboxExecutor, + tx_sender::{SandboxExecutorOptions, TxSenderConfig}, +}; const TEST_TIMEOUT: Duration = Duration::from_secs(90); const POLL_INTERVAL: Duration = Duration::from_millis(50); @@ -103,6 +106,7 @@ pub struct TestServerBuilder { pool: ConnectionPool, api_config: InternalApiConfig, tx_executor: MockOneshotExecutor, + executor_options: Option, method_tracer: Arc, } @@ -113,6 +117,7 @@ impl TestServerBuilder { api_config, pool, tx_executor: MockOneshotExecutor::default(), + executor_options: None, method_tracer: Arc::default(), } } @@ -131,19 +136,17 @@ impl TestServerBuilder { self } + #[must_use] + pub fn with_executor_options(mut self, options: SandboxExecutorOptions) -> Self { + self.executor_options = Some(options); + self + } + /// Builds an HTTP server. pub async fn build_http(self, stop_receiver: watch::Receiver) -> ApiServerHandles { - spawn_server( - ApiTransportLabel::Http, - self.api_config, - self.pool, - None, - self.tx_executor, - self.method_tracer, - stop_receiver, - ) - .await - .0 + self.spawn_server(ApiTransportLabel::Http, None, stop_receiver) + .await + .0 } /// Builds a WS server. @@ -152,60 +155,73 @@ impl TestServerBuilder { websocket_requests_per_minute_limit: Option, stop_receiver: watch::Receiver, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { - spawn_server( + self.spawn_server( ApiTransportLabel::Ws, - self.api_config, - self.pool, websocket_requests_per_minute_limit, - self.tx_executor, - self.method_tracer, stop_receiver, ) .await } -} -async fn spawn_server( - transport: ApiTransportLabel, - api_config: InternalApiConfig, - pool: ConnectionPool, - websocket_requests_per_minute_limit: Option, - tx_executor: MockOneshotExecutor, - method_tracer: Arc, - stop_receiver: watch::Receiver, -) -> (ApiServerHandles, mpsc::UnboundedReceiver) { - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, vm_barrier) = - create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor).await; - let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); - - let mut namespaces = Namespace::DEFAULT.to_vec(); - namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); - - let server_builder = match transport { - ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), - ApiTransportLabel::Ws => { - let mut builder = ApiBuilder::jsonrpsee_backend(api_config, pool) - .ws(0) - .with_subscriptions_limit(100); - if let Some(websocket_requests_per_minute_limit) = websocket_requests_per_minute_limit { - builder = builder - .with_websocket_requests_per_minute_limit(websocket_requests_per_minute_limit); + async fn spawn_server( + self, + transport: ApiTransportLabel, + websocket_requests_per_minute_limit: Option, + stop_receiver: watch::Receiver, + ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { + let Self { + tx_executor, + executor_options, + pool, + api_config, + method_tracer, + } = self; + + let tx_executor = if let Some(options) = executor_options { + SandboxExecutor::custom_mock(tx_executor, options) + } else { + SandboxExecutor::mock(tx_executor).await + }; + let (tx_sender, vm_barrier) = + create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor).await; + let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); + + let mut namespaces = Namespace::DEFAULT.to_vec(); + namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = + BridgeAddressesHandle::new(api_config.bridge_addresses.clone()); + + let server_builder = match transport { + ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), + ApiTransportLabel::Ws => { + let mut builder = ApiBuilder::jsonrpsee_backend(api_config, pool) + .ws(0) + .with_subscriptions_limit(100); + if let Some(websocket_requests_per_minute_limit) = + websocket_requests_per_minute_limit + { + builder = builder.with_websocket_requests_per_minute_limit( + websocket_requests_per_minute_limit, + ); + } + builder } - builder - } - }; - let server_handles = server_builder - .with_polling_interval(POLL_INTERVAL) - .with_tx_sender(tx_sender) - .with_vm_barrier(vm_barrier) - .with_pub_sub_events(pub_sub_events_sender) - .with_method_tracer(method_tracer) - .enable_api_namespaces(namespaces) - .build() - .expect("Unable to build API server") - .run(stop_receiver) - .await - .expect("Failed spawning JSON-RPC server"); - (server_handles, pub_sub_events_receiver) + }; + let server_handles = server_builder + .with_polling_interval(POLL_INTERVAL) + .with_tx_sender(tx_sender) + .with_vm_barrier(vm_barrier) + .with_pub_sub_events(pub_sub_events_sender) + .with_method_tracer(method_tracer) + .enable_api_namespaces(namespaces) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle) + .build() + .expect("Unable to build API server") + .run(stop_receiver) + .await + .expect("Failed spawning JSON-RPC server"); + (server_handles, pub_sub_events_receiver) + } } diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index 4f021b777ae..28a22511fa9 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -139,32 +139,27 @@ impl HttpTest for TraceBlockFlatTest { .await? .unwrap_flat(); - // A transaction with 2 nested calls will convert into 3 Flattened calls. - // Also in this test, all tx have the same # of nested calls - assert_eq!( - block_traces.len(), - tx_results.len() * (tx_results[0].call_traces.len() + 1) - ); + assert_eq!(block_traces.len(), tx_results.len()); + + let tx_traces = &block_traces.first().unwrap().result; // First tx has 2 nested calls, thus 2 sub-traces - assert_eq!(block_traces[0].subtraces, 2); - assert_eq!(block_traces[0].trace_address, [0]); + assert_eq!(tx_traces[0].subtraces, 2); + assert_eq!(tx_traces[0].trace_address, [0]); // Second flat-call (fist nested call) do not have nested calls - assert_eq!(block_traces[1].subtraces, 0); - assert_eq!(block_traces[1].trace_address, [0, 0]); + assert_eq!(tx_traces[1].subtraces, 0); + assert_eq!(tx_traces[1].trace_address, [0, 0]); - let top_level_call_indexes = [0, 3, 6]; + let top_level_call_indexes = [0, 1, 2]; let top_level_traces = top_level_call_indexes .iter() .map(|&i| block_traces[i].clone()); for (top_level_trace, tx_result) in top_level_traces.zip(&tx_results) { - assert_eq!(top_level_trace.action.from, Address::zero()); - assert_eq!(top_level_trace.action.to, BOOTLOADER_ADDRESS); - assert_eq!( - top_level_trace.action.gas, - tx_result.transaction.gas_limit() - ); + let trace = top_level_trace.result.first().unwrap(); + assert_eq!(trace.action.from, Address::zero()); + assert_eq!(trace.action.to, BOOTLOADER_ADDRESS); + assert_eq!(trace.action.gas, tx_result.transaction.gas_limit()); } // TODO: test inner calls } diff --git a/core/node/api_server/src/web3/tests/filters.rs b/core/node/api_server/src/web3/tests/filters.rs index 7342ce7e979..c865526815d 100644 --- a/core/node/api_server/src/web3/tests/filters.rs +++ b/core/node/api_server/src/web3/tests/filters.rs @@ -23,7 +23,7 @@ impl HttpTest for BasicFilterChangesTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -109,7 +109,7 @@ impl HttpTest for LogFilterChangesTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 632e263c653..27932931880 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -16,6 +16,7 @@ use zksync_config::{ }, GenesisConfig, }; +use zksync_contracts::BaseSystemContracts; use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; use zksync_multivm::interface::{ TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, @@ -31,17 +32,22 @@ use zksync_system_constants::{ }; use zksync_types::{ api, - block::{pack_block_info, L2BlockHeader}, + block::{pack_block_info, L2BlockHasher, L2BlockHeader}, + fee_model::{BatchFeeInput, FeeParams}, get_nonce_key, l2::L2Tx, storage::get_code_key, + system_contracts::get_system_smart_contracts, tokens::{TokenInfo, TokenMetadata}, tx::IncludedTxLocation, utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, AccountTreeId, Address, L1BatchNumber, Nonce, ProtocolVersionId, StorageKey, StorageLog, H256, U256, U64, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{ + bytecode::{hash_bytecode, hash_evm_bytecode}, + u256_to_h256, +}; use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::{ client::{Client, DynClient, L2}, @@ -50,7 +56,7 @@ use zksync_web3_decl::{ http_client::HttpClient, rpc_params, types::{ - error::{ErrorCode, OVERSIZED_RESPONSE_CODE}, + error::{ErrorCode, INVALID_PARAMS_CODE, OVERSIZED_RESPONSE_CODE}, ErrorObjectOwned, }, }, @@ -58,7 +64,11 @@ use zksync_web3_decl::{ }; use super::*; -use crate::web3::testonly::TestServerBuilder; +use crate::{ + testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, + tx_sender::SandboxExecutorOptions, + web3::testonly::TestServerBuilder, +}; mod debug; mod filters; @@ -134,13 +144,18 @@ async fn setting_response_size_limits() { trait HttpTest: Send + Sync { /// Prepares the storage before the server is started. The default implementation performs genesis. fn storage_initialization(&self) -> StorageInitialization { - StorageInitialization::Genesis + StorageInitialization::genesis() } fn transaction_executor(&self) -> MockOneshotExecutor { MockOneshotExecutor::default() } + /// Allows to override sandbox executor options. + fn executor_options(&self) -> Option { + None + } + fn method_tracer(&self) -> Arc { Arc::default() } @@ -157,7 +172,9 @@ trait HttpTest: Send + Sync { /// Storage initialization strategy. #[derive(Debug)] enum StorageInitialization { - Genesis, + Genesis { + evm_emulator: bool, + }, Recovery { logs: Vec, factory_deps: HashMap>, @@ -168,6 +185,16 @@ impl StorageInitialization { const SNAPSHOT_RECOVERY_BATCH: L1BatchNumber = L1BatchNumber(23); const SNAPSHOT_RECOVERY_BLOCK: L2BlockNumber = L2BlockNumber(23); + const fn genesis() -> Self { + Self::Genesis { + evm_emulator: false, + } + } + + const fn genesis_with_evm() -> Self { + Self::Genesis { evm_emulator: true } + } + fn empty_recovery() -> Self { Self::Recovery { logs: vec![], @@ -181,12 +208,29 @@ impl StorageInitialization { storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { match self { - Self::Genesis => { - let params = GenesisParams::load_genesis_params(GenesisConfig { + Self::Genesis { evm_emulator } => { + let mut config = GenesisConfig { l2_chain_id: network_config.zksync_network_id, ..mock_genesis_config() - }) + }; + let mut base_system_contracts = BaseSystemContracts::load_from_disk(); + if evm_emulator { + config.evm_emulator_hash = Some(config.default_aa_hash.unwrap()); + base_system_contracts.evm_emulator = + Some(base_system_contracts.default_aa.clone()); + } else { + assert!(config.evm_emulator_hash.is_none()); + } + + let params = GenesisParams::from_genesis_config( + config, + base_system_contracts, + // We cannot load system contracts with EVM emulator yet because these contracts are missing. + // This doesn't matter for tests because the EVM emulator won't be invoked. + get_system_smart_contracts(false), + ) .unwrap(); + if storage.blocks_dal().is_genesis_needed().await? { insert_genesis_batch(storage, ¶ms).await?; } @@ -245,11 +289,13 @@ async fn test_http_server(test: impl HttpTest) { let genesis = GenesisConfig::for_tests(); let mut api_config = InternalApiConfig::new(&web3_config, &contracts_config, &genesis); api_config.filters_disabled = test.filters_disabled(); - let mut server_handles = TestServerBuilder::new(pool.clone(), api_config) + let mut server_builder = TestServerBuilder::new(pool.clone(), api_config) .with_tx_executor(test.transaction_executor()) - .with_method_tracer(test.method_tracer()) - .build_http(stop_receiver) - .await; + .with_method_tracer(test.method_tracer()); + if let Some(executor_options) = test.executor_options() { + server_builder = server_builder.with_executor_options(executor_options); + } + let mut server_handles = server_builder.build_http(stop_receiver).await; let local_addr = server_handles.wait_until_ready().await; let client = Client::http(format!("http://{local_addr}/").parse().unwrap()) @@ -428,6 +474,10 @@ async fn store_events( Ok((tx_location, events)) } +fn scaled_sensible_fee_input(scale: f64) -> BatchFeeInput { + FeeParams::sensible_v1_default().scale(scale, scale) +} + #[derive(Debug)] struct HttpServerBasicsTest; @@ -625,7 +675,7 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { fn storage_initialization(&self) -> StorageInitialization { let address = Address::repeat_byte(1); let code_key = get_code_key(&address); - let code_hash = H256::repeat_byte(2); + let code_hash = hash_bytecode(&[0; 32]); let balance_key = storage_key_for_eth_balance(&address); let logs = vec![ StorageLog::new_write_log(code_key, code_hash), @@ -1102,3 +1152,241 @@ impl HttpTest for GenesisConfigTest { async fn tracing_genesis_config() { test_http_server(GenesisConfigTest).await; } + +#[derive(Debug)] +struct GetBytecodeTest; + +impl GetBytecodeTest { + async fn insert_evm_bytecode( + connection: &mut Connection<'_, Core>, + at_block: L2BlockNumber, + address: Address, + ) -> anyhow::Result<()> { + let evm_bytecode_hash = hash_evm_bytecode(RAW_EVM_BYTECODE); + let code_log = StorageLog::new_write_log(get_code_key(&address), evm_bytecode_hash); + connection + .storage_logs_dal() + .append_storage_logs(at_block, &[code_log]) + .await?; + + let factory_deps = HashMap::from([(evm_bytecode_hash, RAW_EVM_BYTECODE.to_vec())]); + connection + .factory_deps_dal() + .insert_factory_deps(at_block, &factory_deps) + .await?; + Ok(()) + } +} + +#[async_trait] +impl HttpTest for GetBytecodeTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let genesis_evm_address = Address::repeat_byte(1); + let mut connection = pool.connection().await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(0), genesis_evm_address).await?; + + for contract in get_system_smart_contracts(false) { + let bytecode = client + .get_code(*contract.account_id.address(), None) + .await?; + assert_eq!(bytecode.0, contract.bytecode); + } + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let latest_block_variants = [ + api::BlockNumber::Pending, + api::BlockNumber::Latest, + api::BlockNumber::Committed, + ]; + let latest_block_variants = latest_block_variants.map(api::BlockIdVariant::BlockNumber); + + let genesis_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Earliest), + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(0.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + }), + ]; + for at_block in latest_block_variants + .into_iter() + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 0"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + + // Create another block with an EVM bytecode. + let new_bytecode_address = Address::repeat_byte(2); + let mut connection = pool.connection().await?; + let block_header = store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(1), new_bytecode_address).await?; + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + let bytecode = client.get_code(new_bytecode_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let new_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(1.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: block_header.hash, + }), + ]; + for at_block in latest_block_variants.into_iter().chain(new_block_variants) { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + for at_block in genesis_block_variants { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert!(bytecode.0.is_empty()); + } + + for at_block in latest_block_variants + .into_iter() + .chain(new_block_variants) + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 1"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + Ok(()) + } +} + +#[tokio::test] +async fn getting_bytecodes() { + test_http_server(GetBytecodeTest).await; +} + +#[derive(Debug)] +struct FeeHistoryTest; + +#[async_trait] +impl HttpTest for FeeHistoryTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut connection = pool.connection().await?; + let block1 = L2BlockHeader { + batch_fee_input: scaled_sensible_fee_input(1.0), + base_fee_per_gas: 100, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block1, &[]).await?; + let block2 = L2BlockHeader { + batch_fee_input: scaled_sensible_fee_input(2.0), + base_fee_per_gas: 200, + ..create_l2_block(2) + }; + store_custom_l2_block(&mut connection, &block2, &[]).await?; + + let all_pubdata_prices = [ + 0, + block1.batch_fee_input.fair_pubdata_price(), + block2.batch_fee_input.fair_pubdata_price(), + ] + .map(U256::from); + + let history = client + .fee_history(1_000.into(), api::BlockNumber::Latest, Some(vec![])) + .await?; + assert_eq!(history.inner.oldest_block, 0.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [0, 100, 200, 200].map(U256::from) // The latest value is duplicated + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices); + // Values below are not filled. + assert_eq!(history.inner.gas_used_ratio, [0.0; 3]); + assert_eq!(history.inner.base_fee_per_blob_gas, [U256::zero(); 4]); + assert_eq!(history.inner.blob_gas_used_ratio, [0.0; 3]); + + // Check supplying hexadecimal block count + let hex_history: api::FeeHistory = client + .request( + "eth_feeHistory", + rpc_params!["0xaa", "latest", [] as [f64; 0]], + ) + .await?; + assert_eq!(hex_history, history); + + // ...and explicitly decimal count (which should've been supplied in the first call) for exhaustiveness + let dec_history: api::FeeHistory = client + .request( + "eth_feeHistory", + rpc_params![1_000, "latest", [] as [f64; 0]], + ) + .await?; + assert_eq!(dec_history, history); + + // Check partial histories: blocks 0..=1 + let history = client + .fee_history( + 1_000.into(), + api::BlockNumber::Number(1.into()), + Some(vec![]), + ) + .await?; + assert_eq!(history.inner.oldest_block, 0.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [0, 100, 100].map(U256::from) + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[..2]); + + // Blocks 1..=2 + let history = client + .fee_history(2.into(), api::BlockNumber::Latest, Some(vec![])) + .await?; + assert_eq!(history.inner.oldest_block, 1.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [100, 200, 200].map(U256::from) + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[1..]); + + // Blocks 1..=1 + let history = client + .fee_history(1.into(), api::BlockNumber::Number(1.into()), Some(vec![])) + .await?; + assert_eq!(history.inner.oldest_block, 1.into()); + assert_eq!(history.inner.base_fee_per_gas, [100, 100].map(U256::from)); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[1..2]); + + // Non-existing newest block. + let err = client + .fee_history( + 1000.into(), + api::BlockNumber::Number(100.into()), + Some(vec![]), + ) + .await + .unwrap_err(); + assert_matches!( + err, + ClientError::Call(err) if err.code() == INVALID_PARAMS_CODE + ); + Ok(()) + } +} + +#[tokio::test] +async fn getting_fee_history() { + test_http_server(FeeHistoryTest).await; +} diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs index 1d425f8b951..e814081afa0 100644 --- a/core/node/api_server/src/web3/tests/unstable.rs +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -27,14 +27,9 @@ impl HttpTest for GetTeeProofsTest { assert!(proof.is_empty()); - let mut storage = pool.connection().await.unwrap(); - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(batch_no) - .await?; - let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut storage = pool.connection().await.unwrap(); let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); tee_proof_generation_dal .save_attestation(&pubkey, &attestation) diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index d8086c6c6ad..45128f579cd 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -9,20 +9,21 @@ use std::{ }; use api::state_override::{OverrideAccount, StateOverride}; +use test_casing::test_casing; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_multivm::interface::{ - ExecutionResult, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, + ExecutionResult, OneshotEnv, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, }; -use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - api::ApiStorageLog, - fee_model::{BatchFeeInput, FeeParams}, - get_intrinsic_constants, - transaction_request::CallRequest, - K256PrivateKey, L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, - U256, + api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, + transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, + StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, }; use zksync_utils::u256_to_h256; -use zksync_vm_executor::oneshot::MockOneshotExecutor; +use zksync_vm_executor::oneshot::{ + BaseSystemContractsProvider, ContractsKind, MockOneshotExecutor, OneshotEnvParameters, + ResolvedBlockInfo, +}; use zksync_web3_decl::namespaces::DebugNamespaceClient; use super::*; @@ -42,11 +43,7 @@ impl ExpectedFeeInput { fn expect_for_block(&self, number: api::BlockNumber, scale: f64) { *self.0.lock().unwrap() = match number { api::BlockNumber::Number(number) => create_l2_block(number.as_u32()).batch_fee_input, - _ => ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - scale, - scale, - ), + _ => scaled_sensible_fee_input(scale), }; } @@ -69,6 +66,59 @@ impl ExpectedFeeInput { } } +/// Mock base contracts provider. Necessary to use with EVM emulator because bytecode of the real emulator is not available yet. +#[derive(Debug)] +struct BaseContractsWithMockEvmEmulator(BaseSystemContracts); + +impl Default for BaseContractsWithMockEvmEmulator { + fn default() -> Self { + let mut contracts = BaseSystemContracts::load_from_disk(); + contracts.evm_emulator = Some(contracts.default_aa.clone()); + Self(contracts) + } +} + +#[async_trait] +impl BaseSystemContractsProvider for BaseContractsWithMockEvmEmulator { + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result { + assert!(block_info.use_evm_emulator()); + Ok(self.0.clone()) + } +} + +fn executor_options_with_evm_emulator() -> SandboxExecutorOptions { + let base_contracts = Arc::::default(); + SandboxExecutorOptions { + estimate_gas: OneshotEnvParameters::new( + base_contracts.clone(), + L2ChainId::default(), + AccountTreeId::default(), + u32::MAX, + ), + eth_call: OneshotEnvParameters::new( + base_contracts, + L2ChainId::default(), + AccountTreeId::default(), + u32::MAX, + ), + } +} + +/// Fetches base contract hashes from the genesis block. +async fn genesis_contract_hashes( + connection: &mut Connection<'_, Core>, +) -> anyhow::Result { + Ok(connection + .blocks_dal() + .get_l2_block_header(L2BlockNumber(0)) + .await? + .context("no genesis block")? + .base_system_contracts_hashes) +} + #[derive(Debug, Default)] struct CallTest { fee_input: ExpectedFeeInput, @@ -165,28 +215,108 @@ impl HttpTest for CallTest { // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. let mut block_header = create_l2_block(2); - block_header.batch_fee_input = - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 2.5, - 2.5, - ); + block_header.batch_fee_input = scaled_sensible_fee_input(2.5); store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); - let call_request = CallTest::call_request(b"block=3"); - let call_result = client.call(call_request.clone(), None, None).await?; + let call_request = Self::call_request(b"block=3"); + let call_result = client.call(call_request, None, None).await?; assert_eq!(call_result.0, b"output"); + let call_request_without_target = CallRequest { + to: None, + ..Self::call_request(b"block=3") + }; + let err = client + .call(call_request_without_target, None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) } } +fn assert_null_to_address_error(error: &ClientError) { + if let ClientError::Call(error) = error { + assert_eq!(error.code(), 3); + assert!(error.message().contains("toAddressIsNull"), "{error:?}"); + assert!(error.data().is_none(), "{error:?}"); + } else { + panic!("Unexpected error: {error:?}"); + } +} + #[tokio::test] async fn call_method_basics() { test_http_server(CallTest::default()).await; } +fn evm_emulator_responses(tx: &Transaction, env: &OneshotEnv) -> ExecutionResult { + assert!(env + .system + .base_system_smart_contracts + .evm_emulator + .is_some()); + match tx.execute.calldata.as_slice() { + b"no_target" => assert_eq!(tx.recipient_account(), None), + _ => assert!(tx.recipient_account().is_some()), + } + ExecutionResult::Success { + output: b"output".to_vec(), + } +} + +#[derive(Debug)] +struct CallTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for CallTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_call_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + let block_header = L2BlockHeader { + base_system_contracts_hashes: genesis_contract_hashes(&mut connection).await?, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + + let call_result = client.call(CallTest::call_request(&[]), None, None).await?; + assert_eq!(call_result.0, b"output"); + + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"no_target") + }; + let call_result = client.call(call_request_without_target, None, None).await?; + assert_eq!(call_result.0, b"output"); + Ok(()) + } +} + +#[tokio::test] +async fn call_method_with_evm_emulator() { + test_http_server(CallTestWithEvmEmulator).await; +} + #[derive(Debug, Default)] struct CallTestAfterSnapshotRecovery { fee_input: ExpectedFeeInput, @@ -257,16 +387,20 @@ struct SendRawTransactionTest { } impl SendRawTransactionTest { - fn transaction_bytes_and_hash() -> (Vec, H256) { + fn transaction_bytes_and_hash(include_to: bool) -> (Vec, H256) { let private_key = Self::private_key(); let tx_request = api::TransactionRequest { chain_id: Some(L2ChainId::default().as_u64()), from: Some(private_key.address()), - to: Some(Address::repeat_byte(2)), + to: include_to.then(|| Address::repeat_byte(2)), value: 123_456.into(), gas: (get_intrinsic_constants().l2_tx_intrinsic_gas * 2).into(), gas_price: StateKeeperConfig::for_tests().minimal_l2_gas_price.into(), - input: vec![1, 2, 3, 4].into(), + input: if include_to { + vec![1, 2, 3, 4].into() + } else { + b"no_target".to_vec().into() + }, ..api::TransactionRequest::default() }; let data = tx_request.get_rlp().unwrap(); @@ -301,7 +435,7 @@ impl HttpTest for SendRawTransactionTest { factory_deps: HashMap::default(), } } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -313,7 +447,7 @@ impl HttpTest for SendRawTransactionTest { L2BlockNumber(1) }; tx_executor.set_tx_responses(move |tx, env| { - assert_eq!(tx.hash(), Self::transaction_bytes_and_hash().1); + assert_eq!(tx.hash(), Self::transaction_bytes_and_hash(true).1); assert_eq!(env.l1_batch.first_l2_block.number, pending_block.0); ExecutionResult::Success { output: vec![] } }); @@ -334,7 +468,7 @@ impl HttpTest for SendRawTransactionTest { .await?; } - let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(true); let send_result = client.send_raw_transaction(tx_bytes.into()).await?; assert_eq!(send_result, tx_hash); Ok(()) @@ -357,6 +491,90 @@ async fn send_raw_transaction_after_snapshot_recovery() { .await; } +#[derive(Debug)] +struct SendRawTransactionWithoutToAddressTest; + +#[async_trait] +impl HttpTest for SendRawTransactionWithoutToAddressTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut storage = pool.connection().await?; + storage + .storage_logs_dal() + .append_storage_logs( + L2BlockNumber(0), + &[SendRawTransactionTest::balance_storage_log()], + ) + .await?; + + let (tx_bytes, _) = SendRawTransactionTest::transaction_bytes_and_hash(false); + let err = client + .send_raw_transaction(tx_bytes.into()) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[tokio::test] +async fn send_raw_transaction_fails_without_to_address() { + test_http_server(SendRawTransactionWithoutToAddressTest).await; +} + +#[derive(Debug)] +struct SendRawTransactionTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for SendRawTransactionTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_tx_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Manually set sufficient balance for the transaction account. + let mut storage = pool.connection().await?; + storage + .storage_logs_dal() + .append_storage_logs( + L2BlockNumber(0), + &[SendRawTransactionTest::balance_storage_log()], + ) + .await?; + + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(true); + let send_result = client.send_raw_transaction(tx_bytes.into()).await?; + assert_eq!(send_result, tx_hash); + + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(false); + let send_result = client.send_raw_transaction(tx_bytes.into()).await?; + assert_eq!(send_result, tx_hash); + Ok(()) + } +} + +#[tokio::test] +async fn send_raw_transaction_with_evm_emulator() { + test_http_server(SendRawTransactionTestWithEvmEmulator).await; +} + #[derive(Debug)] struct SendTransactionWithDetailedOutputTest; @@ -405,7 +623,7 @@ impl SendTransactionWithDetailedOutputTest { impl HttpTest for SendTransactionWithDetailedOutputTest { fn transaction_executor(&self) -> MockOneshotExecutor { let mut tx_executor = MockOneshotExecutor::default(); - let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(); + let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(true); let vm_execution_logs = VmExecutionLogs { storage_logs: self.storage_logs(), events: self.vm_events(), @@ -423,6 +641,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { logs: vm_execution_logs.clone(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }); tx_executor @@ -443,7 +662,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { ) .await?; - let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(true); let send_result = client .send_raw_transaction_with_detailed_output(tx_bytes.into()) .await?; @@ -562,12 +781,7 @@ impl HttpTest for TraceCallTest { // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. let mut block_header = create_l2_block(2); - block_header.batch_fee_input = - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 3.0, - 3.0, - ); + block_header.batch_fee_input = scaled_sensible_fee_input(3.0); store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); @@ -575,6 +789,16 @@ impl HttpTest for TraceCallTest { let call_result = client.trace_call(call_request.clone(), None, None).await?; Self::assert_debug_call(&call_request, &call_result.unwrap_default()); + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"block=3") + }; + let err = client + .call(call_request_without_target, None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) } } @@ -651,16 +875,96 @@ async fn trace_call_after_snapshot_recovery() { test_http_server(TraceCallTestAfterSnapshotRecovery::default()).await; } +#[derive(Debug)] +struct TraceCallTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for TraceCallTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_call_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + let block_header = L2BlockHeader { + base_system_contracts_hashes: genesis_contract_hashes(&mut connection).await?, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + + client + .trace_call(CallTest::call_request(&[]), None, None) + .await?; + + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"no_target") + }; + client + .trace_call(call_request_without_target, None, None) + .await?; + Ok(()) + } +} + +#[tokio::test] +async fn trace_call_method_with_evm_emulator() { + test_http_server(TraceCallTestWithEvmEmulator).await; +} + +#[derive(Debug, Clone, Copy)] +enum EstimateMethod { + EthEstimateGas, + ZksEstimateFee, + ZksEstimateGasL1ToL2, +} + +impl EstimateMethod { + const ALL: [Self; 3] = [ + Self::EthEstimateGas, + Self::ZksEstimateFee, + Self::ZksEstimateGasL1ToL2, + ]; + + async fn query(self, client: &DynClient, req: CallRequest) -> Result { + match self { + Self::EthEstimateGas => client.estimate_gas(req, None, None).await, + Self::ZksEstimateFee => client + .estimate_fee(req, None) + .await + .map(|fee| fee.gas_limit), + Self::ZksEstimateGasL1ToL2 => client.estimate_gas_l1_to_l2(req, None).await, + } + } +} + #[derive(Debug)] struct EstimateGasTest { gas_limit_threshold: Arc, + method: EstimateMethod, snapshot_recovery: bool, } impl EstimateGasTest { - fn new(snapshot_recovery: bool) -> Self { + fn new(method: EstimateMethod, snapshot_recovery: bool) -> Self { Self { gas_limit_threshold: Arc::default(), + method, snapshot_recovery, } } @@ -681,9 +985,12 @@ impl HttpTest for EstimateGasTest { L2BlockNumber(1) }; let gas_limit_threshold = self.gas_limit_threshold.clone(); + let should_set_nonce = !matches!(self.method, EstimateMethod::ZksEstimateGasL1ToL2); tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.execute.calldata(), [] as [u8; 0]); - assert_eq!(tx.nonce(), Some(Nonce(0))); + if should_set_nonce { + assert_eq!(tx.nonce(), Some(Nonce(0))); + } assert_eq!(env.l1_batch.first_l2_block.number, pending_block_number.0); let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); @@ -706,8 +1013,9 @@ impl HttpTest for EstimateGasTest { let l2_transaction = create_l2_transaction(10, 100); for threshold in [10_000, 50_000, 100_000, 1_000_000] { self.gas_limit_threshold.store(threshold, Ordering::Relaxed); - let output = client - .estimate_gas(l2_transaction.clone().into(), None, None) + let output = self + .method + .query(client, l2_transaction.clone().into()) .await?; assert!( output >= U256::from(threshold), @@ -732,19 +1040,17 @@ impl HttpTest for EstimateGasTest { let mut call_request = CallRequest::from(l2_transaction); call_request.from = Some(SendRawTransactionTest::private_key().address()); call_request.value = Some(1_000_000.into()); - client - .estimate_gas(call_request.clone(), None, None) - .await?; + + self.method.query(client, call_request.clone()).await?; call_request.value = Some(U256::max_value()); - let error = client - .estimate_gas(call_request, None, None) - .await - .unwrap_err(); + let error = self.method.query(client, call_request).await.unwrap_err(); if let ClientError::Call(error) = error { let error_msg = error.message(); + // L1 and L2 transactions have differing error messages in this case. assert!( - error_msg.to_lowercase().contains("insufficient"), + error_msg.to_lowercase().contains("insufficient") + || error_msg.to_lowercase().contains("overflow"), "{error_msg}" ); } else { @@ -754,14 +1060,16 @@ impl HttpTest for EstimateGasTest { } } +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_basics() { - test_http_server(EstimateGasTest::new(false)).await; +async fn estimate_gas_basics(method: EstimateMethod) { + test_http_server(EstimateGasTest::new(method, false)).await; } +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_after_snapshot_recovery() { - test_http_server(EstimateGasTest::new(true)).await; +async fn estimate_gas_after_snapshot_recovery(method: EstimateMethod) { + test_http_server(EstimateGasTest::new(method, true)).await; } #[derive(Debug)] @@ -818,9 +1126,7 @@ impl HttpTest for EstimateGasWithStateOverrideTest { if let ClientError::Call(error) = error { let error_msg = error.message(); assert!( - error_msg - .to_lowercase() - .contains("insufficient balance for transfer"), + error_msg.to_lowercase().contains("insufficient funds"), "{error_msg}" ); } else { @@ -832,6 +1138,87 @@ impl HttpTest for EstimateGasWithStateOverrideTest { #[tokio::test] async fn estimate_gas_with_state_override() { - let inner = EstimateGasTest::new(false); + let inner = EstimateGasTest::new(EstimateMethod::EthEstimateGas, false); test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } + +#[derive(Debug)] +struct EstimateGasWithoutToAddressTest { + method: EstimateMethod, +} + +#[async_trait] +impl HttpTest for EstimateGasWithoutToAddressTest { + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut l2_transaction = create_l2_transaction(10, 100); + l2_transaction.execute.contract_address = None; + l2_transaction.common_data.signature = vec![]; // Remove invalidated signature so that it doesn't trip estimation logic + let err = self + .method + .query(client, l2_transaction.into()) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[test_casing(3, EstimateMethod::ALL)] +#[tokio::test] +async fn estimate_gas_fails_without_to_address(method: EstimateMethod) { + test_http_server(EstimateGasWithoutToAddressTest { method }).await; +} + +#[derive(Debug)] +struct EstimateGasTestWithEvmEmulator { + method: EstimateMethod, +} + +#[async_trait] +impl HttpTest for EstimateGasTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_tx_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let call_request = CallRequest { + from: Some(Address::repeat_byte(1)), + to: Some(Address::repeat_byte(2)), + ..CallRequest::default() + }; + self.method.query(client, call_request).await?; + + let call_request = CallRequest { + from: Some(Address::repeat_byte(1)), + to: None, + data: Some(b"no_target".to_vec().into()), + ..CallRequest::default() + }; + self.method.query(client, call_request).await?; + Ok(()) + } +} + +#[test_casing(3, EstimateMethod::ALL)] +#[tokio::test] +async fn estimate_gas_with_evm_emulator(method: EstimateMethod) { + test_http_server(EstimateGasTestWithEvmEmulator { method }).await; +} diff --git a/core/node/api_server/src/web3/tests/ws.rs b/core/node/api_server/src/web3/tests/ws.rs index 28b2e2beb55..008747a63bc 100644 --- a/core/node/api_server/src/web3/tests/ws.rs +++ b/core/node/api_server/src/web3/tests/ws.rs @@ -147,7 +147,7 @@ async fn notifiers_start_after_snapshot_recovery() { trait WsTest: Send + Sync { /// Prepares the storage before the server is started. The default implementation performs genesis. fn storage_initialization(&self) -> StorageInitialization { - StorageInitialization::Genesis + StorageInitialization::genesis() } async fn test( @@ -234,7 +234,7 @@ impl WsTest for BasicSubscriptionsTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -403,7 +403,7 @@ impl WsTest for LogSubscriptionsTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } diff --git a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs index 0199b06ebd6..0922101e59d 100644 --- a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs +++ b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs @@ -6,7 +6,7 @@ use std::{ }; use anyhow::Context; -use bigdecimal::{num_bigint::ToBigInt, BigDecimal, Zero}; +use bigdecimal::{BigDecimal, Zero}; use zksync_config::BaseTokenAdjusterConfig; use zksync_eth_client::{BoundEthInterface, CallFunctionArgs, Options}; use zksync_node_fee_model::l1_gas_price::TxParamsProvider; @@ -57,7 +57,7 @@ impl BaseTokenL1Behaviour { self.update_last_persisted_l1_ratio(prev_ratio.clone()); tracing::info!( "Fetched current base token ratio from the L1: {}", - prev_ratio.to_bigint().unwrap() + prev_ratio ); prev_ratio }; @@ -71,7 +71,7 @@ impl BaseTokenL1Behaviour { "Skipping L1 update. current_ratio {}, previous_ratio {}, deviation {}", current_ratio, prev_ratio, - deviation.to_bigint().unwrap() + deviation ); return Ok(()); } @@ -98,7 +98,7 @@ impl BaseTokenL1Behaviour { new_ratio.denominator.get(), base_fee_per_gas, priority_fee_per_gas, - deviation.to_bigint().unwrap() + deviation ); METRICS .l1_gas_used diff --git a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs index 220f100e5dc..785c9c4dfd7 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs @@ -90,6 +90,9 @@ impl BaseTokenRatioPersister { result: OperationResult::Success, }] .observe(start_time.elapsed()); + METRICS + .ratio + .set((ratio.numerator.get() as f64) / (ratio.denominator.get() as f64)); return Ok(ratio); } Err(err) => { diff --git a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs index e16ea16ff0f..b613e5219dd 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs @@ -81,7 +81,7 @@ impl DBBaseTokenRatioProvider { // Though the DB should be populated very soon after the server starts, it is possible // to have no ratios in the DB right after genesis. Having initial ratios in the DB // from the genesis stage will eliminate this possibility. - tracing::error!("No latest price found in the database. Using default ratio."); + tracing::warn!("No latest price found in the database. Using default ratio."); BaseTokenConversionRatio::default() } Err(err) => anyhow::bail!("Failed to get latest base token ratio: {:?}", err), diff --git a/core/node/base_token_adjuster/src/metrics.rs b/core/node/base_token_adjuster/src/metrics.rs index d84e4da0c0c..17a48c1b5c3 100644 --- a/core/node/base_token_adjuster/src/metrics.rs +++ b/core/node/base_token_adjuster/src/metrics.rs @@ -18,6 +18,7 @@ pub(crate) struct OperationResultLabels { #[metrics(prefix = "base_token_adjuster")] pub(crate) struct BaseTokenAdjusterMetrics { pub l1_gas_used: Gauge, + pub ratio: Gauge, #[metrics(buckets = Buckets::LATENCIES)] pub external_price_api_latency: Family>, #[metrics(buckets = Buckets::LATENCIES)] diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index e54b0490aa1..b2c4ee6465f 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -59,7 +59,6 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora l1_tx_count: 0, l2_tx_count: 0, fee_account_address: Address::default(), - pubdata_params: Default::default(), base_fee_per_gas: 0, batch_fee_input: Default::default(), gas_per_pubdata_limit: 0, @@ -68,6 +67,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; storage .blocks_dal() @@ -88,6 +88,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora system_logs: vec![], protocol_version: Some(ProtocolVersionId::latest()), pubdata_input: None, + fee_address: Default::default(), }; storage .blocks_dal() diff --git a/core/node/commitment_generator/Cargo.toml b/core/node/commitment_generator/Cargo.toml index 5ec8410124f..1f4645414cb 100644 --- a/core/node/commitment_generator/Cargo.toml +++ b/core/node/commitment_generator/Cargo.toml @@ -20,6 +20,7 @@ zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_contracts.workspace = true zksync_multivm.workspace = true +zksync_system_constants.workspace = true circuit_sequencer_api_1_4_0.workspace = true circuit_sequencer_api_1_4_1.workspace = true circuit_sequencer_api_1_5_0.workspace = true diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index d592845e6df..294b6c50985 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -10,7 +10,7 @@ use zksync_multivm::zk_evm_latest::ethereum_types::U256; use zksync_types::{ blob::num_blobs_required, commitment::{ - AuxCommitments, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, + AuxCommitments, BlobHash, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, L1BatchCommitment, L1BatchCommitmentArtifacts, L1BatchCommitmentMode, }, writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, @@ -180,6 +180,7 @@ impl CommitmentGenerator { rollup_root_hash: tree_data.hash, bootloader_code_hash: header.base_system_contracts_hashes.bootloader, default_aa_code_hash: header.base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: header.base_system_contracts_hashes.evm_emulator, protocol_version, }; let touched_slots = connection @@ -266,7 +267,7 @@ impl CommitmentGenerator { } state_diffs.sort_unstable_by_key(|rec| (rec.address, rec.key)); - let (blob_commitments, blob_linear_hashes) = if protocol_version.is_post_1_4_2() { + let blob_hashes = if protocol_version.is_post_1_4_2() { let pubdata_input = header.pubdata_input.with_context(|| { format!("`pubdata_input` is missing for L1 batch #{l1_batch_number}") })?; @@ -280,28 +281,35 @@ impl CommitmentGenerator { pubdata_input, ); - (commitments, linear_hashes) + commitments + .into_iter() + .zip(linear_hashes) + .map(|(commitment, linear_hash)| BlobHash { + commitment, + linear_hash, + }) + .collect::>() } else { - ( - vec![H256::zero(); num_blobs_required(&protocol_version)], - vec![H256::zero(); num_blobs_required(&protocol_version)], - ) + vec![Default::default(); num_blobs_required(&protocol_version)] }; - let mut connection = self - .connection_pool - .connection_tagged("commitment_generator") - .await?; - let aggregated_root = read_aggregation_root(&mut connection, l1_batch_number).await?; + let aggregation_root = if protocol_version.is_pre_gateway() { + H256::zero() + } else { + let mut connection = self + .connection_pool + .connection_tagged("commitment_generator") + .await?; + read_aggregation_root(&mut connection, l1_batch_number).await? + }; CommitmentInput::PostBoojum { common, system_logs: header.system_logs, state_diffs, aux_commitments, - blob_commitments, - blob_linear_hashes, - aggregated_root, + blob_hashes, + aggregation_root, } }; @@ -380,13 +388,10 @@ impl CommitmentGenerator { (L1BatchCommitmentMode::Rollup, _) => { // Do nothing } - ( - L1BatchCommitmentMode::Validium, - CommitmentInput::PostBoojum { - blob_commitments, .. - }, - ) => { - blob_commitments.fill(H256::zero()); + (L1BatchCommitmentMode::Validium, CommitmentInput::PostBoojum { blob_hashes, .. }) => { + for hashes in blob_hashes { + hashes.commitment = H256::zero(); + } } (L1BatchCommitmentMode::Validium, _) => { /* Do nothing */ } } @@ -396,14 +401,9 @@ impl CommitmentGenerator { match (self.commitment_mode, &mut commitment.auxiliary_output) { ( L1BatchCommitmentMode::Validium, - L1BatchAuxiliaryOutput::PostBoojum { - blob_linear_hashes, - blob_commitments, - .. - }, + L1BatchAuxiliaryOutput::PostBoojum { blob_hashes, .. }, ) => { - blob_linear_hashes.fill(H256::zero()); - blob_commitments.fill(H256::zero()); + blob_hashes.fill(Default::default()); } _ => { /* Do nothing */ } } diff --git a/core/node/commitment_generator/src/utils.rs b/core/node/commitment_generator/src/utils.rs index 9ed6682733c..d405a1256a2 100644 --- a/core/node/commitment_generator/src/utils.rs +++ b/core/node/commitment_generator/src/utils.rs @@ -19,6 +19,7 @@ use zk_evm_1_5_0::{ use zksync_dal::{Connection, Core, CoreDal}; use zksync_l1_contract_interface::i_executor::commit::kzg::ZK_SYNC_BYTES_PER_BLOB; use zksync_multivm::{interface::VmEvent, utils::get_used_bootloader_memory_bytes}; +use zksync_system_constants::message_root::{AGG_TREE_HEIGHT_KEY, AGG_TREE_NODES_KEY}; use zksync_types::{ vm::VmVersion, web3::keccak256, @@ -249,9 +250,11 @@ pub(crate) fn pubdata_to_blob_linear_hashes( // Now, we need to calculate the linear hashes of the blobs. // Firstly, let's pad the pubdata to the size of the blob. if pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { - let padding = - vec![0u8; ZK_SYNC_BYTES_PER_BLOB - pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB]; - pubdata_input.extend(padding); + pubdata_input.resize( + pubdata_input.len() + + (ZK_SYNC_BYTES_PER_BLOB - pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB), + 0, + ); } let mut result = vec![H256::zero(); blobs_required]; @@ -270,12 +273,6 @@ pub(crate) async fn read_aggregation_root( connection: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result { - // Position of `FullTree::_height` in `MessageRoot`'s storage layout. - const AGG_TREE_HEIGHT_KEY: usize = 3; - - // Position of `FullTree::nodes` in `MessageRoot`'s storage layout. - const AGG_TREE_NODES_KEY: usize = 5; - let (_, last_l2_block) = connection .blocks_dal() .get_l2_block_range_of_l1_batch(l1_batch_number) @@ -284,7 +281,7 @@ pub(crate) async fn read_aggregation_root( let agg_tree_height_slot = StorageKey::new( AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS), - u256_to_h256(AGG_TREE_HEIGHT_KEY.into()), + H256::from_low_u64_be(AGG_TREE_HEIGHT_KEY as u64), ); let agg_tree_height = connection diff --git a/core/node/consensus/src/batch.rs b/core/node/consensus/src/batch.rs deleted file mode 100644 index 08246c4e5c0..00000000000 --- a/core/node/consensus/src/batch.rs +++ /dev/null @@ -1,275 +0,0 @@ -//! L1 Batch representation for sending over p2p network. -use anyhow::Context as _; -use zksync_concurrency::{ctx, error::Wrap as _}; -use zksync_consensus_roles::validator; -use zksync_dal::consensus_dal::Payload; -use zksync_l1_contract_interface::i_executor; -use zksync_metadata_calculator::api_server::{TreeApiClient, TreeEntryWithProof}; -use zksync_system_constants as constants; -use zksync_types::{ - abi, - block::{unpack_block_info, L2BlockHasher}, - AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, H256, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::storage::ConnectionPool; - -/// Commitment to the last block of a batch. -pub(crate) struct LastBlockCommit { - /// Hash of the `StoredBatchInfo` which is stored on L1. - /// The hashed `StoredBatchInfo` contains a `root_hash` of the L2 state, - /// which contains state of the `SystemContext` contract, - /// which contains enough data to reconstruct the hash - /// of the last L2 block of the batch. - pub(crate) info: H256, -} - -/// Witness proving what is the last block of a batch. -/// Contains the hash and the number of the last block. -pub(crate) struct LastBlockWitness { - info: i_executor::structures::StoredBatchInfo, - protocol_version: ProtocolVersionId, - - current_l2_block_info: TreeEntryWithProof, - tx_rolling_hash: TreeEntryWithProof, - l2_block_hash_entry: TreeEntryWithProof, -} - -/// Commitment to an L1 batch. -pub(crate) struct L1BatchCommit { - pub(crate) number: L1BatchNumber, - pub(crate) this_batch: LastBlockCommit, - pub(crate) prev_batch: LastBlockCommit, -} - -/// L1Batch with witness that can be -/// verified against `L1BatchCommit`. -pub struct L1BatchWithWitness { - pub(crate) blocks: Vec, - pub(crate) this_batch: LastBlockWitness, - pub(crate) prev_batch: LastBlockWitness, -} - -impl LastBlockWitness { - /// Address of the SystemContext contract. - fn system_context_addr() -> AccountTreeId { - AccountTreeId::new(constants::SYSTEM_CONTEXT_ADDRESS) - } - - /// Storage key of the `SystemContext.current_l2_block_info` field. - fn current_l2_block_info_key() -> U256 { - StorageKey::new( - Self::system_context_addr(), - constants::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ) - .hashed_key_u256() - } - - /// Storage key of the `SystemContext.tx_rolling_hash` field. - fn tx_rolling_hash_key() -> U256 { - StorageKey::new( - Self::system_context_addr(), - constants::SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ) - .hashed_key_u256() - } - - /// Storage key of the entry of the `SystemContext.l2BlockHash[]` array, corresponding to l2 - /// block with number i. - fn l2_block_hash_entry_key(i: L2BlockNumber) -> U256 { - let key = h256_to_u256(constants::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION) - + U256::from(i.0) % U256::from(constants::SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES); - StorageKey::new(Self::system_context_addr(), u256_to_h256(key)).hashed_key_u256() - } - - /// Loads a `LastBlockWitness` from storage. - async fn load( - ctx: &ctx::Ctx, - n: L1BatchNumber, - pool: &ConnectionPool, - tree: &dyn TreeApiClient, - ) -> ctx::Result { - let mut conn = pool.connection(ctx).await.wrap("pool.connection()")?; - let batch = conn - .batch(ctx, n) - .await - .wrap("batch()")? - .context("batch not in storage")?; - - let proofs = tree - .get_proofs( - n, - vec![ - Self::current_l2_block_info_key(), - Self::tx_rolling_hash_key(), - ], - ) - .await - .context("get_proofs()")?; - if proofs.len() != 2 { - return Err(anyhow::format_err!("proofs.len()!=2").into()); - } - let current_l2_block_info = proofs[0].clone(); - let tx_rolling_hash = proofs[1].clone(); - let (block_number, _) = unpack_block_info(current_l2_block_info.value.as_bytes().into()); - let prev = L2BlockNumber( - block_number - .checked_sub(1) - .context("L2BlockNumber underflow")? - .try_into() - .context("L2BlockNumber overflow")?, - ); - let proofs = tree - .get_proofs(n, vec![Self::l2_block_hash_entry_key(prev)]) - .await - .context("get_proofs()")?; - if proofs.len() != 1 { - return Err(anyhow::format_err!("proofs.len()!=1").into()); - } - let l2_block_hash_entry = proofs[0].clone(); - Ok(Self { - info: i_executor::structures::StoredBatchInfo::from(&batch), - protocol_version: batch - .header - .protocol_version - .context("missing protocol_version")?, - - current_l2_block_info, - tx_rolling_hash, - l2_block_hash_entry, - }) - } - - /// Verifies the proof against the commit and returns the hash - /// of the last L2 block. - pub(crate) fn verify(&self, comm: &LastBlockCommit) -> anyhow::Result<(L2BlockNumber, H256)> { - // Verify info. - anyhow::ensure!(comm.info == self.info.hash()); - - // Check the protocol version. - anyhow::ensure!( - self.protocol_version >= ProtocolVersionId::Version13, - "unsupported protocol version" - ); - - let (block_number, block_timestamp) = - unpack_block_info(self.current_l2_block_info.value.as_bytes().into()); - let prev = L2BlockNumber( - block_number - .checked_sub(1) - .context("L2BlockNumber underflow")? - .try_into() - .context("L2BlockNumber overflow")?, - ); - - // Verify merkle paths. - self.current_l2_block_info - .verify(Self::current_l2_block_info_key(), self.info.batch_hash) - .context("invalid merkle path for current_l2_block_info")?; - self.tx_rolling_hash - .verify(Self::tx_rolling_hash_key(), self.info.batch_hash) - .context("invalid merkle path for tx_rolling_hash")?; - self.l2_block_hash_entry - .verify(Self::l2_block_hash_entry_key(prev), self.info.batch_hash) - .context("invalid merkle path for l2_block_hash entry")?; - - let block_number = L2BlockNumber(block_number.try_into().context("block_number overflow")?); - // Derive hash of the last block - Ok(( - block_number, - L2BlockHasher::hash( - block_number, - block_timestamp, - self.l2_block_hash_entry.value, - self.tx_rolling_hash.value, - self.protocol_version, - ), - )) - } - - /// Last L2 block of the batch. - pub fn last_block(&self) -> validator::BlockNumber { - let (n, _) = unpack_block_info(self.current_l2_block_info.value.as_bytes().into()); - validator::BlockNumber(n) - } -} - -impl L1BatchWithWitness { - /// Loads an `L1BatchWithWitness` from storage. - pub(crate) async fn load( - ctx: &ctx::Ctx, - number: L1BatchNumber, - pool: &ConnectionPool, - tree: &dyn TreeApiClient, - ) -> ctx::Result { - let prev_batch = LastBlockWitness::load(ctx, number - 1, pool, tree) - .await - .with_wrap(|| format!("LastBlockWitness::make({})", number - 1))?; - let this_batch = LastBlockWitness::load(ctx, number, pool, tree) - .await - .with_wrap(|| format!("LastBlockWitness::make({number})"))?; - let mut conn = pool.connection(ctx).await.wrap("connection()")?; - let this = Self { - blocks: conn - .payloads( - ctx, - std::ops::Range { - start: prev_batch.last_block() + 1, - end: this_batch.last_block() + 1, - }, - ) - .await - .wrap("payloads()")?, - prev_batch, - this_batch, - }; - Ok(this) - } - - /// Verifies the L1Batch and witness against the commitment. - /// WARNING: the following fields of the payload are not currently verified: - /// * `l1_gas_price` - /// * `l2_fair_gas_price` - /// * `fair_pubdata_price` - /// * `virtual_blocks` - /// * `operator_address` - /// * `protocol_version` (present both in payload and witness, but neither has a commitment) - pub(crate) fn verify(&self, comm: &L1BatchCommit) -> anyhow::Result<()> { - let (last_number, last_hash) = self.this_batch.verify(&comm.this_batch)?; - let (mut prev_number, mut prev_hash) = self.prev_batch.verify(&comm.prev_batch)?; - anyhow::ensure!( - self.prev_batch - .info - .batch_number - .checked_add(1) - .context("batch_number overflow")? - == u64::from(comm.number.0) - ); - anyhow::ensure!(self.this_batch.info.batch_number == u64::from(comm.number.0)); - for (i, b) in self.blocks.iter().enumerate() { - anyhow::ensure!(b.l1_batch_number == comm.number); - anyhow::ensure!(b.protocol_version == self.this_batch.protocol_version); - anyhow::ensure!(b.last_in_batch == (i + 1 == self.blocks.len())); - prev_number += 1; - let mut hasher = L2BlockHasher::new(prev_number, b.timestamp, prev_hash); - for t in &b.transactions { - // Reconstruct transaction by converting it back and forth to `abi::Transaction`. - // This allows us to verify that the transaction actually matches the transaction - // hash. - // TODO: make consensus payload contain `abi::Transaction` instead. - // TODO: currently the payload doesn't contain the block number, which is - // annoying. Consider adding it to payload. - let t2: Transaction = abi::Transaction::try_from(t.clone())?.try_into()?; - anyhow::ensure!(t == &t2); - hasher.push_tx_hash(t.hash()); - } - prev_hash = hasher.finalize(self.this_batch.protocol_version); - anyhow::ensure!(prev_hash == b.hash); - } - anyhow::ensure!(prev_hash == last_hash); - anyhow::ensure!(prev_number == last_number); - Ok(()) - } -} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index 3584d533f66..4ad7a551ab4 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -169,7 +169,6 @@ pub(super) fn executor( server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, - max_batch_size: cfg.max_batch_size, node_key: node_key(secrets) .context("node_key")? .context("missing node_key")?, @@ -184,6 +183,5 @@ pub(super) fn executor( gossip_static_outbound, rpc, debug_page, - batch_poll_interval: time::Duration::seconds(1), }) } diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index e4be8d9d687..5e9aadc8f37 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_consensus_storage::{BlockStore, PersistentBlockStore as _}; use zksync_dal::consensus_dal; use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; @@ -21,6 +21,10 @@ use crate::{ storage::{self, ConnectionPool}, }; +/// If less than TEMPORARY_FETCHER_THRESHOLD certificates are missing, +/// the temporary fetcher will stop fetching blocks. +pub(crate) const TEMPORARY_FETCHER_THRESHOLD: u64 = 10; + /// External node. pub(super) struct EN { pub(super) pool: ConnectionPool, @@ -32,8 +36,13 @@ impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). /// - /// NOTE: Before starting the consensus node it fetches all the blocks + /// If `enable_pregenesis` is false, + /// before starting the consensus node it fetches all the blocks /// older than consensus genesis from the main node using json RPC. + /// NOTE: currently `enable_pregenesis` is hardcoded to `false` in `era.rs`. + /// True is used only in tests. Once the `block_metadata` RPC is enabled everywhere + /// this flag should be removed and fetching pregenesis blocks will always be done + /// over the gossip network. pub async fn run( self, ctx: &ctx::Ctx, @@ -41,6 +50,7 @@ impl EN { cfg: ConsensusConfig, secrets: ConsensusSecrets, build_version: Option, + enable_pregenesis: bool, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -72,13 +82,15 @@ impl EN { drop(conn); // Fetch blocks before the genesis. - self.fetch_blocks( - ctx, - &mut payload_queue, - Some(global_config.genesis.first_block), - ) - .await - .wrap("fetch_blocks()")?; + if !enable_pregenesis { + self.fetch_blocks( + ctx, + &mut payload_queue, + Some(global_config.genesis.first_block), + ) + .await + .wrap("fetch_blocks()")?; + } // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. @@ -88,7 +100,12 @@ impl EN { let old = old; loop { if let Ok(new) = self.fetch_global_config(ctx).await { - if new != old { + // We verify the transition here to work around the situation + // where `consenus_global_config()` RPC fails randomly and fallback + // to `consensus_genesis()` RPC activates. + if new != old + && consensus_dal::verify_config_transition(&old, &new).is_ok() + { return Err(anyhow::format_err!( "global config changed: old {old:?}, new {new:?}" ) @@ -102,21 +119,35 @@ impl EN { // Run consensus component. // External nodes have a payload queue which they use to fetch data from the main node. - let (store, runner) = Store::new(ctx, self.pool.clone(), Some(payload_queue)) - .await - .wrap("Store::new()")?; + let (store, runner) = Store::new( + ctx, + self.pool.clone(), + Some(payload_queue), + Some(self.client.clone()), + ) + .await + .wrap("Store::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + // Run the temporary fetcher until the certificates are backfilled. + // Temporary fetcher should be removed once json RPC syncing is fully deprecated. + s.spawn_bg({ + let store = store.clone(); + async { + let store = store; + self.temporary_block_fetcher(ctx, &store).await?; + tracing::info!( + "temporary block fetcher finished, switching to p2p fetching only" + ); + Ok(()) + } + }); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) - .await - .wrap("BatchStore::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let attestation = Arc::new(attestation::Controller::new(attester)); s.spawn_bg(self.run_attestation_controller( ctx, @@ -127,7 +158,6 @@ impl EN { let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, build_version)?, block_store, - batch_store, validator: config::validator_key(&secrets) .context("validator_key")? .map(|key| executor::Validator { @@ -192,7 +222,11 @@ impl EN { let mut next = attester::BatchNumber(0); loop { let status = loop { - match self.fetch_attestation_status(ctx).await { + match self + .fetch_attestation_status(ctx) + .await + .wrap("fetch_attestation_status()") + { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { if status.genesis != cfg.genesis.hash() { @@ -210,10 +244,13 @@ impl EN { "waiting for hash of batch {:?}", status.next_batch_to_attest ); - let hash = self - .pool - .wait_for_batch_hash(ctx, status.next_batch_to_attest) - .await?; + let hash = consensus_dal::batch_hash( + &self + .pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?, + ); let Some(committee) = registry .attester_committee_for( ctx, @@ -348,8 +385,42 @@ impl EN { } } + /// Fetches blocks from the main node directly, until the certificates + /// are backfilled. This allows for smooth transition from json RPC to p2p block syncing. + pub(crate) async fn temporary_block_fetcher( + &self, + ctx: &ctx::Ctx, + store: &Store, + ) -> ctx::Result<()> { + const MAX_CONCURRENT_REQUESTS: usize = 30; + scope::run!(ctx, |ctx, s| async { + let (send, mut recv) = ctx::channel::bounded(MAX_CONCURRENT_REQUESTS); + s.spawn(async { + let Some(mut next) = store.next_block(ctx).await? else { + return Ok(()); + }; + while store.persisted().borrow().next().0 + TEMPORARY_FETCHER_THRESHOLD < next.0 { + let n = L2BlockNumber(next.0.try_into().context("overflow")?); + self.sync_state.wait_for_main_node_block(ctx, n).await?; + send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; + next = next.next(); + } + drop(send); + Ok(()) + }); + while let Ok(block) = recv.recv_or_disconnected(ctx).await? { + store + .queue_next_fetched_block(ctx, block.join(ctx).await?) + .await + .wrap("queue_next_fetched_block()")?; + } + Ok(()) + }) + .await + } + /// Fetches blocks from the main node in range `[cursor.next()..end)`. - pub(super) async fn fetch_blocks( + async fn fetch_blocks( &self, ctx: &ctx::Ctx, queue: &mut storage::PayloadQueue, @@ -363,7 +434,7 @@ impl EN { s.spawn(async { let send = send; while end.map_or(true, |end| next < end) { - let n = L2BlockNumber(next.0.try_into().unwrap()); + let n = L2BlockNumber(next.0.try_into().context("overflow")?); self.sync_state.wait_for_main_node_block(ctx, n).await?; send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; next = next.next(); @@ -372,7 +443,7 @@ impl EN { }); while end.map_or(true, |end| queue.next() < end) { let block = recv.recv(ctx).await?.join(ctx).await?; - queue.send(block).await?; + queue.send(block).await.context("queue.send()")?; } Ok(()) }) @@ -381,7 +452,8 @@ impl EN { if first < queue.next() { self.pool .wait_for_payload(ctx, queue.next().prev().unwrap()) - .await?; + .await + .wrap("wait_for_payload()")?; } Ok(()) } diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 3150f839680..916b7cdd89a 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -59,8 +59,18 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - en.run(ctx, actions, cfg, secrets, Some(build_version)) - .await + // We will enable it once the main node on all envs supports + // `block_metadata()` JSON RPC method. + let enable_pregenesis = false; + en.run( + ctx, + actions, + cfg, + secrets, + Some(build_version), + enable_pregenesis, + ) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index ff9cdf86528..8bf078120aa 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -6,10 +6,6 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; mod abi; -// Currently `batch` module is only used in tests, -// but will be used in production once batch syncing is implemented in consensus. -#[allow(unused)] -mod batch; mod config; mod en; pub mod era; diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index f80bfe58954..2a280b2f161 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -5,12 +5,12 @@ use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_consensus_storage::BlockStore; use zksync_dal::consensus_dal; use crate::{ config, registry, - storage::{ConnectionPool, InsertCertificateError, Store}, + storage::{ConnectionPool, Store}, }; /// Task running a consensus validator for the main node. @@ -43,7 +43,7 @@ pub async fn run_main_node( } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. - let (store, runner) = Store::new(ctx, pool.clone(), None) + let (store, runner) = Store::new(ctx, pool.clone(), None, None) .await .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); @@ -67,11 +67,6 @@ pub async fn run_main_node( .wrap("BlockStore::new()")?; s.spawn_bg(runner.run(ctx)); - let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) - .await - .wrap("BatchStore::new()")?; - s.spawn_bg(runner.run(ctx)); - let attestation = Arc::new(attestation::Controller::new(attester)); s.spawn_bg(run_attestation_controller( ctx, @@ -83,7 +78,6 @@ pub async fn run_main_node( let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, - batch_store, validator: Some(executor::Validator { key: validator_key, replica_store: Box::new(store.clone()), @@ -135,9 +129,10 @@ async fn run_attestation_controller( "waiting for hash of batch {:?}", status.next_batch_to_attest ); - let hash = pool - .wait_for_batch_hash(ctx, status.next_batch_to_attest) + let info = pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) .await?; + let hash = consensus_dal::batch_hash(&info); let Some(committee) = registry .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) .await @@ -184,10 +179,7 @@ async fn run_attestation_controller( .wrap("connection()")? .insert_batch_certificate(ctx, &qc) .await - .map_err(|err| match err { - InsertCertificateError::Canceled(err) => ctx::Error::Canceled(err), - InsertCertificateError::Inner(err) => ctx::Error::Internal(err.into()), - })?; + .wrap("insert_batch_certificate()")?; } } .await; diff --git a/core/node/consensus/src/registry/abi.rs b/core/node/consensus/src/registry/abi.rs index 55cc7f9264f..57c65b10ce5 100644 --- a/core/node/consensus/src/registry/abi.rs +++ b/core/node/consensus/src/registry/abi.rs @@ -19,7 +19,8 @@ impl AsRef for ConsensusRegistry { } impl ConsensusRegistry { - const FILE: &'static str = "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; + const FILE: &'static str = + "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; /// Loads bytecode of the contract. #[cfg(test)] diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs index 773a1fbbee7..89afc20e1d5 100644 --- a/core/node/consensus/src/registry/tests.rs +++ b/core/node/consensus/src/registry/tests.rs @@ -1,5 +1,5 @@ use rand::Rng as _; -use zksync_concurrency::{ctx, scope}; +use zksync_concurrency::{ctx, scope, time}; use zksync_consensus_roles::{attester, validator::testonly::Setup}; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; @@ -7,7 +7,9 @@ use zksync_types::ProtocolVersionId; use super::*; use crate::storage::ConnectionPool; -// Test checking that parsing logic matches the abi specified in the json file. +const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + +/// Test checking that parsing logic matches the abi specified in the json file. #[test] fn test_consensus_registry_abi() { zksync_concurrency::testonly::abort_on_panic(); @@ -73,10 +75,12 @@ async fn test_attester_committee() { node.push_block(&txs).await; node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_batch()).await?; + pool.wait_for_batch_info(ctx, node.last_batch(), POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // Read the attester committee using the vm. - let batch = attester::BatchNumber(node.last_batch().0.into()); + let batch = attester::BatchNumber(node.last_batch().0); assert_eq!( Some(committee), registry diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 0f9d7c8527f..6ec5794e968 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -1,18 +1,18 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; -use zksync_consensus_crypto::keccak256::Keccak256; use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; -use zksync_consensus_storage::{self as storage, BatchStoreState}; -use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; +use zksync_consensus_storage as storage; +use zksync_dal::{ + consensus_dal::{AttestationStatus, BlockMetadata, GlobalConfig, Payload}, + Core, CoreDal, DalError, +}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{ - commitment::L1BatchWithMetadata, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, -}; +use zksync_types::{fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber}; use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; -use super::{InsertCertificateError, PayloadQueue}; +use super::PayloadQueue; use crate::config; /// Context-aware `zksync_dal::ConnectionPool` wrapper. @@ -54,24 +54,24 @@ impl ConnectionPool { /// Waits for the `number` L1 batch hash. #[tracing::instrument(skip_all)] - pub async fn wait_for_batch_hash( + pub async fn wait_for_batch_info( &self, ctx: &ctx::Ctx, number: attester::BatchNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + interval: time::Duration, + ) -> ctx::Result { loop { - if let Some(hash) = self + if let Some(info) = self .connection(ctx) .await .wrap("connection()")? - .batch_hash(ctx, number) + .batch_info(ctx, number) .await - .with_wrap(|| format!("batch_hash({number})"))? + .with_wrap(|| format!("batch_info({number})"))? { - return Ok(hash); + return Ok(info); } - ctx.sleep(POLL_INTERVAL).await?; + ctx.sleep(interval).await?; } } } @@ -109,16 +109,23 @@ impl<'a> Connection<'a> { .map_err(DalError::generalize)?) } - /// Wrapper for `consensus_dal().block_payloads()`. - pub async fn payloads( + pub async fn batch_info( &mut self, ctx: &ctx::Ctx, - numbers: std::ops::Range, - ) -> ctx::Result> { + n: attester::BatchNumber, + ) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().batch_info(n)).await??) + } + + /// Wrapper for `consensus_dal().block_metadata()`. + pub async fn block_metadata( + &mut self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { Ok(ctx - .wait(self.0.consensus_dal().block_payloads(numbers)) - .await? - .map_err(DalError::generalize)?) + .wait(self.0.consensus_dal().block_metadata(number)) + .await??) } /// Wrapper for `consensus_dal().block_certificate()`. @@ -138,7 +145,7 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &validator::CommitQC, - ) -> Result<(), InsertCertificateError> { + ) -> Result<(), super::InsertCertificateError> { Ok(ctx .wait(self.0.consensus_dal().insert_block_certificate(cert)) .await??) @@ -151,20 +158,10 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &attester::BatchQC, - ) -> Result<(), InsertCertificateError> { - use consensus_dal::InsertCertificateError as E; - let want_hash = self - .batch_hash(ctx, cert.message.number) - .await - .wrap("batch_hash()")? - .ok_or(E::MissingPayload)?; - if want_hash != cert.message.hash { - return Err(E::PayloadMismatch.into()); - } + ) -> ctx::Result<()> { Ok(ctx .wait(self.0.consensus_dal().insert_batch_certificate(cert)) - .await? - .map_err(E::Other)?) + .await??) } /// Wrapper for `consensus_dal().upsert_attester_committee()`. @@ -203,37 +200,6 @@ impl<'a> Connection<'a> { .context("sqlx")?) } - /// Wrapper for `consensus_dal().batch_hash()`. - pub async fn batch_hash( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let n = L1BatchNumber(number.0.try_into().context("overflow")?); - let Some(meta) = ctx - .wait(self.0.blocks_dal().get_l1_batch_metadata(n)) - .await? - .context("get_l1_batch_metadata()")? - else { - return Ok(None); - }; - Ok(Some(attester::BatchHash(Keccak256::from_bytes( - StoredBatchInfo::from(&meta).hash().0, - )))) - } - - /// Wrapper for `blocks_dal().get_l1_batch_metadata()`. - pub async fn batch( - &mut self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.blocks_dal().get_l1_batch_metadata(number)) - .await? - .context("get_l1_batch_metadata()")?) - } - /// Wrapper for `FetcherCursor::new()`. pub async fn new_payload_queue( &mut self, @@ -249,10 +215,7 @@ impl<'a> Connection<'a> { } /// Wrapper for `consensus_dal().global_config()`. - pub async fn global_config( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { + pub async fn global_config(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(ctx.wait(self.0.consensus_dal().global_config()).await??) } @@ -260,7 +223,7 @@ impl<'a> Connection<'a> { pub async fn try_update_global_config( &mut self, ctx: &ctx::Ctx, - cfg: &consensus_dal::GlobalConfig, + cfg: &GlobalConfig, ) -> ctx::Result<()> { Ok(ctx .wait(self.0.consensus_dal().try_update_global_config(cfg)) @@ -273,14 +236,14 @@ impl<'a> Connection<'a> { Ok(ctx.wait(self.0.consensus_dal().next_block()).await??) } - /// Wrapper for `consensus_dal().block_certificates_range()`. + /// Wrapper for `consensus_dal().block_store_state()`. #[tracing::instrument(skip_all)] - pub(crate) async fn block_certificates_range( + pub(crate) async fn block_store_state( &mut self, ctx: &ctx::Ctx, ) -> ctx::Result { Ok(ctx - .wait(self.0.consensus_dal().block_certificates_range()) + .wait(self.0.consensus_dal().block_store_state()) .await??) } @@ -305,7 +268,7 @@ impl<'a> Connection<'a> { } tracing::info!("Performing a hard fork of consensus."); - let new = consensus_dal::GlobalConfig { + let new = GlobalConfig { genesis: validator::GenesisRaw { chain_id: spec.chain_id, fork_number: old.as_ref().map_or(validator::ForkNumber(0), |old| { @@ -334,38 +297,35 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::Result> { - let Some(justification) = self - .block_certificate(ctx, number) - .await - .wrap("block_certificate()")? - else { + ) -> ctx::Result> { + let Some(payload) = self.payload(ctx, number).await.wrap("payload()")? else { return Ok(None); }; - let payload = self - .payload(ctx, number) + if let Some(justification) = self + .block_certificate(ctx, number) .await - .wrap("payload()")? - .context("L2 block disappeared from storage")?; - - Ok(Some(validator::FinalBlock { - payload: payload.encode(), - justification, - })) - } + .wrap("block_certificate()")? + { + return Ok(Some( + validator::FinalBlock { + payload: payload.encode(), + justification, + } + .into(), + )); + } - /// Wrapper for `blocks_dal().get_sealed_l1_batch_number()`. - #[tracing::instrument(skip_all)] - pub async fn get_last_batch_number( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.blocks_dal().get_sealed_l1_batch_number()) - .await? - .context("get_sealed_l1_batch_number()")? - .map(|nr| attester::BatchNumber(nr.0 as u64))) + Ok(Some( + validator::PreGenesisBlock { + number, + payload: payload.encode(), + // We won't use justification until it is possible to verify + // payload against the L1 batch commitment. + justification: validator::Justification(vec![]), + } + .into(), + )) } /// Wrapper for `blocks_dal().get_l2_block_range_of_l1_batch()`. @@ -388,83 +348,11 @@ impl<'a> Connection<'a> { })) } - /// Construct the [attester::SyncBatch] for a given batch number. - pub async fn get_batch( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let Some((min, max)) = self - .get_l2_block_range_of_l1_batch(ctx, number) - .await - .context("get_l2_block_range_of_l1_batch()")? - else { - return Ok(None); - }; - - let payloads = self.payloads(ctx, min..max).await.wrap("payloads()")?; - let payloads = payloads.into_iter().map(|p| p.encode()).collect(); - - // TODO: Fill out the proof when we have the stateless L1 batch validation story finished. - // It is supposed to be a Merkle proof that the rolling hash of the batch has been included - // in the L1 system contract state tree. It is *not* the Ethereum state root hash, so producing - // it can be done without an L1 client, which is only required for validation. - let batch = attester::SyncBatch { - number, - payloads, - proof: Vec::new(), - }; - - Ok(Some(batch)) - } - - /// Construct the [storage::BatchStoreState] which contains the earliest batch and the last available [attester::SyncBatch]. - #[tracing::instrument(skip_all)] - pub async fn batches_range(&mut self, ctx: &ctx::Ctx) -> ctx::Result { - let first = self - .0 - .blocks_dal() - .get_earliest_l1_batch_number() - .await - .context("get_earliest_l1_batch_number()")?; - - let first = if first.is_some() { - first - } else { - self.0 - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await - .context("get_earliest_l1_batch_number()")? - .map(|s| s.l1_batch_number) - }; - - // TODO: In the future when we start filling in the `SyncBatch::proof` field, - // we can only run `get_batch` expecting `Some` result on numbers where the - // L1 state root hash is already available, so that we can produce some - // Merkle proof that the rolling hash of the L2 blocks in the batch has - // been included in the L1 state tree. At that point we probably can't - // call `get_last_batch_number` here, but something that indicates that - // the hashes/commitments on the L1 batch are ready and the thing has - // been included in L1; that potentially requires an API client as well. - let last = self - .get_last_batch_number(ctx) - .await - .context("get_last_batch_number()")?; - - Ok(BatchStoreState { - first: first - .map(|n| attester::BatchNumber(n.0 as u64)) - .unwrap_or(attester::BatchNumber(0)), - last, - }) - } - /// Wrapper for `consensus_dal().attestation_status()`. pub async fn attestation_status( &mut self, ctx: &ctx::Ctx, - ) -> ctx::Result> { + ) -> ctx::Result> { Ok(ctx .wait(self.0.consensus_dal().attestation_status()) .await? diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 96a47f5abe7..154509e97b1 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -1,15 +1,18 @@ use std::sync::Arc; use anyhow::Context as _; -use tokio::sync::watch::Sender; use tracing::Instrument; use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time}; use zksync_consensus_bft::PayloadManager; -use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; -use zksync_consensus_storage::{self as storage, BatchStoreState}; +use zksync_consensus_roles::validator; +use zksync_consensus_storage::{self as storage}; use zksync_dal::consensus_dal::{self, Payload}; use zksync_node_sync::fetcher::{FetchedBlock, FetchedTransaction}; use zksync_types::L2BlockNumber; +use zksync_web3_decl::{ + client::{DynClient, L2}, + namespaces::EnNamespaceClient as _, +}; use super::{Connection, PayloadQueue}; use crate::storage::{ConnectionPool, InsertCertificateError}; @@ -35,7 +38,7 @@ fn to_fetched_block( l1_gas_price: payload.l1_gas_price, l2_fair_gas_price: payload.l2_fair_gas_price, fair_pubdata_price: payload.fair_pubdata_price, - pubdata_params: payload.pubdata_params.unwrap_or_default(), + pubdata_params: payload.pubdata_params, virtual_blocks: payload.virtual_blocks, operator_address: payload.operator_address, transactions: payload @@ -47,7 +50,7 @@ fn to_fetched_block( } /// Wrapper of `ConnectionPool` implementing `ReplicaStore`, `PayloadManager`, -/// `PersistentBlockStore` and `PersistentBatchStore`. +/// `PersistentBlockStore`. /// /// Contains queues to save Quorum Certificates received over gossip to the store /// as and when the payload they are over becomes available. @@ -60,8 +63,8 @@ pub(crate) struct Store { block_certificates: ctx::channel::UnboundedSender, /// Range of L2 blocks for which we have a QC persisted. blocks_persisted: sync::watch::Receiver, - /// Range of L1 batches we have persisted. - batches_persisted: sync::watch::Receiver, + /// Main node client. None if this node is the main node. + client: Option>>, } struct PersistedBlockState(sync::watch::Sender); @@ -70,7 +73,6 @@ struct PersistedBlockState(sync::watch::Sender); pub struct StoreRunner { pool: ConnectionPool, blocks_persisted: PersistedBlockState, - batches_persisted: sync::watch::Sender, block_certificates: ctx::channel::UnboundedReceiver, } @@ -79,22 +81,15 @@ impl Store { ctx: &ctx::Ctx, pool: ConnectionPool, payload_queue: Option, + client: Option>>, ) -> ctx::Result<(Store, StoreRunner)> { let mut conn = pool.connection(ctx).await.wrap("connection()")?; // Initial state of persisted blocks - let blocks_persisted = conn - .block_certificates_range(ctx) - .await - .wrap("block_certificates_range()")?; - - // Initial state of persisted batches - let batches_persisted = conn.batches_range(ctx).await.wrap("batches_range()")?; - + let blocks_persisted = conn.block_store_state(ctx).await.wrap("blocks_range()")?; drop(conn); let blocks_persisted = sync::watch::channel(blocks_persisted).0; - let batches_persisted = sync::watch::channel(batches_persisted).0; let (block_certs_send, block_certs_recv) = ctx::channel::unbounded(); Ok(( @@ -103,12 +98,11 @@ impl Store { block_certificates: block_certs_send, block_payloads: Arc::new(sync::Mutex::new(payload_queue)), blocks_persisted: blocks_persisted.subscribe(), - batches_persisted: batches_persisted.subscribe(), + client, }, StoreRunner { pool, blocks_persisted: PersistedBlockState(blocks_persisted), - batches_persisted, block_certificates: block_certs_recv, }, )) @@ -118,6 +112,30 @@ impl Store { async fn conn(&self, ctx: &ctx::Ctx) -> ctx::Result { self.pool.connection(ctx).await.wrap("connection") } + + /// Number of the next block to queue. + pub(crate) async fn next_block( + &self, + ctx: &ctx::Ctx, + ) -> ctx::OrCanceled> { + Ok(sync::lock(ctx, &self.block_payloads) + .await? + .as_ref() + .map(|p| p.next())) + } + + /// Queues the next block. + pub(crate) async fn queue_next_fetched_block( + &self, + ctx: &ctx::Ctx, + block: FetchedBlock, + ) -> ctx::Result<()> { + let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + if let Some(payloads) = &mut *payloads { + payloads.send(block).await.context("payloads.send()")?; + } + Ok(()) + } } impl PersistedBlockState { @@ -126,7 +144,7 @@ impl PersistedBlockState { /// If `persisted.first` is moved forward, it means that blocks have been pruned. /// If `persisted.last` is moved forward, it means that new blocks with certificates have been /// persisted. - #[tracing::instrument(skip_all, fields(first = %new.first, last = ?new.last.as_ref().map(|l| l.message.proposal.number)))] + #[tracing::instrument(skip_all, fields(first = %new.first, next = ?new.next()))] fn update(&self, new: storage::BlockStoreState) { self.0.send_if_modified(|p| { if &new == p { @@ -140,10 +158,11 @@ impl PersistedBlockState { }); } - /// Checks if the given certificate is exactly the next one that should - /// be persisted. + /// Checks if the given certificate should be eventually persisted. + /// Only certificates block store state is a range of blocks for which we already have + /// certificates and we need certs only for the later ones. fn should_be_persisted(&self, cert: &validator::CommitQC) -> bool { - self.0.borrow().next() == cert.header().number + self.0.borrow().next() <= cert.header().number } /// Appends the `cert` to `persisted` range. @@ -153,7 +172,7 @@ impl PersistedBlockState { if p.next() != cert.header().number { return false; } - p.last = Some(cert); + p.last = Some(storage::Last::Final(cert)); true }); } @@ -164,7 +183,6 @@ impl StoreRunner { let StoreRunner { pool, blocks_persisted, - batches_persisted, mut block_certificates, } = self; @@ -177,13 +195,13 @@ impl StoreRunner { ) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); - let range = pool + let state = pool .connection(ctx) .await? - .block_certificates_range(ctx) + .block_store_state(ctx) .await - .wrap("block_certificates_range()")?; - blocks_persisted.update(range); + .wrap("block_store_state()")?; + blocks_persisted.update(state); ctx.sleep(POLL_INTERVAL).await?; Ok(()) @@ -196,60 +214,6 @@ impl StoreRunner { } }); - #[tracing::instrument(skip_all, fields(l1_batch = %next_batch_number))] - async fn gossip_sync_batches_iteration( - ctx: &ctx::Ctx, - pool: &ConnectionPool, - next_batch_number: &mut BatchNumber, - batches_persisted: &Sender, - ) -> ctx::Result<()> { - const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); - - let mut conn = pool.connection(ctx).await?; - if let Some(last_batch_number) = conn - .get_last_batch_number(ctx) - .await - .wrap("last_batch_number()")? - { - if last_batch_number >= *next_batch_number { - let range = conn.batches_range(ctx).await.wrap("batches_range()")?; - *next_batch_number = last_batch_number.next(); - tracing::info_span!("batches_persisted_send").in_scope(|| { - batches_persisted.send_replace(range); - }); - } - } - ctx.sleep(POLL_INTERVAL).await?; - - Ok(()) - } - - // NOTE: Running this update loop will trigger the gossip of `SyncBatches` which is currently - // pointless as there is no proof and we have to ignore them. We can disable it, but bear in - // mind that any node which gossips the availability will cause pushes and pulls in the consensus. - s.spawn::<()>(async { - // Loop updating `batches_persisted` whenever a new L1 batch is available in the database. - // We have to do this because the L1 batch is produced as L2 blocks are executed, - // which can happen on a different machine or in a different process, so we can't rely on some - // DAL method updating this memory construct. However I'm not sure that `BatchStoreState` - // really has to contain the full blown last batch, or whether it could have for example - // just the number of it. We can't just use the `attester::BatchQC`, which would make it - // analogous to the `BlockStoreState`, because the `SyncBatch` mechanism is for catching - // up with L1 batches from peers _without_ the QC, based on L1 inclusion proofs instead. - // Nevertheless since the `SyncBatch` contains all transactions for all L2 blocks, - // we can try to make it less frequent by querying just the last batch number first. - let mut next_batch_number = { batches_persisted.borrow().next() }; - loop { - gossip_sync_batches_iteration( - ctx, - &pool, - &mut next_batch_number, - &batches_persisted, - ) - .await?; - } - }); - #[tracing::instrument(skip_all)] async fn insert_block_certificates_iteration( ctx: &ctx::Ctx, @@ -291,9 +255,7 @@ impl StoreRunner { Err(InsertCertificateError::Canceled(err)) => { return Err(ctx::Error::Canceled(err)) } - Err(InsertCertificateError::Inner(err)) => { - return Err(ctx::Error::Internal(anyhow::Error::from(err))) - } + Err(err) => Err(err).context("insert_block_certificate()")?, } } @@ -340,7 +302,7 @@ impl storage::PersistentBlockStore for Store { &self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::Result { + ) -> ctx::Result { Ok(self .conn(ctx) .await? @@ -349,6 +311,41 @@ impl storage::PersistentBlockStore for Store { .context("not found")?) } + async fn verify_pregenesis_block( + &self, + ctx: &ctx::Ctx, + block: &validator::PreGenesisBlock, + ) -> ctx::Result<()> { + // We simply ask the main node for the payload hash and compare it against the received + // payload. + let meta = match &self.client { + None => self + .conn(ctx) + .await? + .block_metadata(ctx, block.number) + .await? + .context("metadata not in storage")?, + Some(client) => { + let meta = ctx + .wait(client.block_metadata(L2BlockNumber( + block.number.0.try_into().context("overflow")?, + ))) + .await? + .context("block_metadata()")? + .context("metadata not available")?; + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: false, + } + .proto_fmt(&meta.0) + .context("deserialize()")? + } + }; + if meta.payload_hash != block.payload.hash() { + return Err(anyhow::format_err!("payload hash mismatch").into()); + } + Ok(()) + } + /// If actions queue is set (and the block has not been stored yet), /// the block will be translated into a sequence of actions. /// The received actions should be fed @@ -357,19 +354,21 @@ impl storage::PersistentBlockStore for Store { /// `store_next_block()` call will wait synchronously for the L2 block. /// Once the L2 block is observed in storage, `store_next_block()` will store a cert for this /// L2 block. - async fn queue_next_block( - &self, - ctx: &ctx::Ctx, - block: validator::FinalBlock, - ) -> ctx::Result<()> { + async fn queue_next_block(&self, ctx: &ctx::Ctx, block: validator::Block) -> ctx::Result<()> { let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + let (p, j) = match &block { + validator::Block::Final(block) => (&block.payload, Some(&block.justification)), + validator::Block::PreGenesis(block) => (&block.payload, None), + }; if let Some(payloads) = &mut *payloads { payloads - .send(to_fetched_block(block.number(), &block.payload).context("to_fetched_block")?) + .send(to_fetched_block(block.number(), p).context("to_fetched_block")?) .await - .context("payload_queue.send()")?; + .context("payloads.send()")?; + } + if let Some(justification) = j { + self.block_certificates.send(justification.clone()); } - self.block_certificates.send(block.justification); Ok(()) } } @@ -456,43 +455,3 @@ impl PayloadManager for Store { Ok(()) } } - -#[async_trait::async_trait] -impl storage::PersistentBatchStore for Store { - /// Range of batches persisted in storage. - fn persisted(&self) -> sync::watch::Receiver { - self.batches_persisted.clone() - } - - /// Returns the batch with the given number. - async fn get_batch( - &self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - self.conn(ctx) - .await? - .get_batch(ctx, number) - .await - .wrap("get_batch") - } - - /// Queue the batch to be persisted in storage. - /// - /// The caller [BatchStore] ensures that this is only called when the batch is the next expected one. - async fn queue_next_batch( - &self, - _ctx: &ctx::Ctx, - _batch: attester::SyncBatch, - ) -> ctx::Result<()> { - // Currently the gossiping of `SyncBatch` and the `BatchStoreState` is unconditionally started by the `Network::run_stream` in consensus, - // and as long as any node reports new batches available by updating the `PersistentBatchStore::persisted` here, the other nodes - // will start pulling the corresponding batches, which will end up being passed to this method. - // If we return an error here or panic, it will stop the whole consensus task tree due to the way scopes work, so instead just return immediately. - // In the future we have to validate the proof agains the L1 state root hash, which IIUC we can't do just yet. - - // Err(anyhow::format_err!("unimplemented: queue_next_batch should not be called until we have the stateless L1 batch story completed.").into()) - - Ok(()) - } -} diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 65c464d98b9..2aed011d23c 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -7,8 +7,8 @@ use zksync_dal::CoreDal as _; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{recover, snapshot, Snapshot}; use zksync_types::{ - commitment::L1BatchWithMetadata, protocol_version::ProtocolSemanticVersion, - system_contracts::get_system_smart_contracts, L1BatchNumber, L2BlockNumber, ProtocolVersionId, + protocol_version::ProtocolSemanticVersion, system_contracts::get_system_smart_contracts, + L1BatchNumber, L2BlockNumber, ProtocolVersionId, }; use super::{Connection, ConnectionPool}; @@ -57,7 +57,7 @@ pub(crate) fn mock_genesis_params(protocol_version: ProtocolVersionId) -> Genesi GenesisParams::from_genesis_config( cfg, BaseSystemContracts::load_from_disk(), - get_system_smart_contracts(), + get_system_smart_contracts(false), ) .unwrap() } @@ -102,28 +102,6 @@ impl ConnectionPool { Ok(()) } - /// Waits for the `number` L1 batch. - pub async fn wait_for_batch( - &self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); - loop { - if let Some(payload) = self - .connection(ctx) - .await - .wrap("connection()")? - .batch(ctx, number) - .await - .wrap("batch()")? - { - return Ok(payload); - } - ctx.sleep(POLL_INTERVAL).await?; - } - } - /// Takes a storage snapshot at the last sealed L1 batch. pub(crate) async fn snapshot(&self, ctx: &ctx::Ctx) -> ctx::Result { let mut conn = self.connection(ctx).await.wrap("connection()")?; @@ -152,21 +130,32 @@ impl ConnectionPool { Self(pool) } - /// Waits for `want_last` block to have certificate then fetches all L2 blocks with certificates. - pub async fn wait_for_block_certificates( + /// Waits for `want_last` block then fetches all L2 blocks with certificates. + pub async fn wait_for_blocks( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, - ) -> ctx::Result> { - self.wait_for_block_certificate(ctx, want_last).await?; + ) -> ctx::Result> { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); + let state = loop { + let state = self + .connection(ctx) + .await + .wrap("connection()")? + .block_store_state(ctx) + .await + .wrap("block_store_state()")?; + tracing::info!("state.next() = {}", state.next()); + if state.next() > want_last { + break state; + } + ctx.sleep(POLL_INTERVAL).await?; + }; + + assert_eq!(want_last.next(), state.next()); let mut conn = self.connection(ctx).await.wrap("connection()")?; - let range = conn - .block_certificates_range(ctx) - .await - .wrap("certificates_range()")?; - assert_eq!(want_last.next(), range.next()); - let mut blocks: Vec = vec![]; - for i in range.first.0..range.next().0 { + let mut blocks: Vec = vec![]; + for i in state.first.0..state.next().0 { let i = validator::BlockNumber(i); let block = conn.block(ctx, i).await.context("block()")?.unwrap(); blocks.push(block); @@ -174,13 +163,13 @@ impl ConnectionPool { Ok(blocks) } - /// Same as `wait_for_certificates`, but additionally verifies all the blocks against genesis. - pub async fn wait_for_block_certificates_and_verify( + /// Same as `wait_for_blocks`, but additionally verifies all certificates. + pub async fn wait_for_blocks_and_verify_certs( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, - ) -> ctx::Result> { - let blocks = self.wait_for_block_certificates(ctx, want_last).await?; + ) -> ctx::Result> { + let blocks = self.wait_for_blocks(ctx, want_last).await?; let cfg = self .connection(ctx) .await @@ -190,7 +179,9 @@ impl ConnectionPool { .wrap("genesis()")? .context("genesis is missing")?; for block in &blocks { - block.verify(&cfg.genesis).context(block.number())?; + if let validator::Block::Final(block) = block { + block.verify(&cfg.genesis).context(block.number())?; + } } Ok(blocks) } @@ -228,19 +219,11 @@ impl ConnectionPool { let registry = registry::Registry::new(cfg.genesis.clone(), self.clone()).await; for i in first.0..want_last.0 { let i = attester::BatchNumber(i); - let hash = conn - .batch_hash(ctx, i) - .await - .wrap("batch_hash()")? - .context("hash missing")?; let cert = conn .batch_certificate(ctx, i) .await .wrap("batch_certificate")? .context("cert missing")?; - if cert.message.hash != hash { - return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); - } let committee = registry .attester_committee_for(ctx, registry_addr, i) .await @@ -255,28 +238,30 @@ impl ConnectionPool { pub async fn prune_batches( &self, ctx: &ctx::Ctx, - last_batch: L1BatchNumber, + last_batch: attester::BatchNumber, ) -> ctx::Result<()> { let mut conn = self.connection(ctx).await.context("connection()")?; - let (_, last_block) = ctx - .wait( - conn.0 - .blocks_dal() - .get_l2_block_range_of_l1_batch(last_batch), - ) - .await? - .context("get_l2_block_range_of_l1_batch()")? - .context("batch not found")?; - conn.0 - .pruning_dal() - .soft_prune_batches_range(last_batch, last_block) - .await - .context("soft_prune_batches_range()")?; - conn.0 - .pruning_dal() - .hard_prune_batches_range(last_batch, last_block) + let (_, last_block) = conn + .get_l2_block_range_of_l1_batch(ctx, last_batch) .await - .context("hard_prune_batches_range()")?; + .wrap("get_l2_block_range_of_l1_batch()")? + .context("batch not found")?; + let last_batch = L1BatchNumber(last_batch.0.try_into().context("oveflow")?); + let last_block = L2BlockNumber(last_block.0.try_into().context("oveflow")?); + ctx.wait( + conn.0 + .pruning_dal() + .soft_prune_batches_range(last_batch, last_block), + ) + .await? + .context("soft_prune_batches_range()")?; + ctx.wait( + conn.0 + .pruning_dal() + .hard_prune_batches_range(last_batch, last_block), + ) + .await? + .context("hard_prune_batches_range()")?; Ok(()) } } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 1996928b26e..db433665e57 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -16,10 +16,7 @@ use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_network as network; use zksync_consensus_roles::{attester, validator, validator::testonly::Setup}; use zksync_dal::{CoreDal, DalError}; -use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; -use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MetadataCalculator, MetadataCalculatorConfig, -}; +use zksync_metadata_calculator::{MetadataCalculator, MetadataCalculatorConfig}; use zksync_node_api_server::web3::{state::InternalApiConfig, testonly::TestServerBuilder}; use zksync_node_genesis::GenesisParams; use zksync_node_sync::{ @@ -49,9 +46,8 @@ use zksync_types::{ use zksync_web3_decl::client::{Client, DynClient, L2}; use crate::{ - batch::{L1BatchCommit, L1BatchWithWitness, LastBlockCommit}, en, - storage::ConnectionPool, + storage::{ConnectionPool, Store}, }; /// Fake StateKeeper for tests. @@ -70,7 +66,6 @@ pub(super) struct StateKeeper { sync_state: SyncState, addr: sync::watch::Receiver>, pool: ConnectionPool, - tree_reader: LazyAsyncTreeReader, } #[derive(Clone)] @@ -78,6 +73,7 @@ pub(super) struct ConfigSet { net: network::Config, pub(super) config: config::ConsensusConfig, pub(super) secrets: config::ConsensusSecrets, + pub(super) enable_pregenesis: bool, } impl ConfigSet { @@ -87,11 +83,17 @@ impl ConfigSet { config: make_config(&net, None), secrets: make_secrets(&net, None), net, + enable_pregenesis: self.enable_pregenesis, } } } -pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { +pub(super) fn new_configs( + rng: &mut impl Rng, + setup: &Setup, + seed_peers: usize, + pregenesis: bool, +) -> Vec { let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), @@ -131,6 +133,7 @@ pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) config: make_config(&net, Some(genesis_spec.clone())), secrets: make_secrets(&net, setup.attester_keys.get(i).cloned()), net, + enable_pregenesis: pregenesis, }) .collect() } @@ -154,6 +157,7 @@ fn make_config( genesis_spec: Option, ) -> config::ConsensusConfig { config::ConsensusConfig { + port: Some(cfg.server_addr.port()), server_addr: *cfg.server_addr, public_addr: config::Host(cfg.public_addr.0.clone()), max_payload_size: usize::MAX, @@ -248,7 +252,6 @@ impl StateKeeper { let metadata_calculator = MetadataCalculator::new(config, None, pool.0.clone()) .await .context("MetadataCalculator::new()")?; - let tree_reader = metadata_calculator.tree_reader(); Ok(( Self { protocol_version, @@ -261,7 +264,6 @@ impl StateKeeper { sync_state: sync_state.clone(), addr: addr.subscribe(), pool: pool.clone(), - tree_reader, }, StateKeeperRunner { actions_queue, @@ -370,51 +372,14 @@ impl StateKeeper { } /// Batch of the `last_block`. - pub fn last_batch(&self) -> L1BatchNumber { - self.last_batch + pub fn last_batch(&self) -> attester::BatchNumber { + attester::BatchNumber(self.last_batch.0.into()) } /// Last L1 batch that has been sealed and will have /// metadata computed eventually. - pub fn last_sealed_batch(&self) -> L1BatchNumber { - self.last_batch - (!self.batch_sealed) as u32 - } - - /// Loads a commitment to L1 batch directly from the database. - // TODO: ideally, we should rather fake fetching it from Ethereum. - // We can use `zksync_eth_client::clients::MockEthereum` for that, - // which implements `EthInterface`. It should be enough to use - // `MockEthereum.with_call_handler()`. - pub async fn load_batch_commit( - &self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result { - // TODO: we should mock the `eth_sender` as well. - let mut conn = self.pool.connection(ctx).await?; - let this = conn.batch(ctx, number).await?.context("missing batch")?; - let prev = conn - .batch(ctx, number - 1) - .await? - .context("missing batch")?; - Ok(L1BatchCommit { - number, - this_batch: LastBlockCommit { - info: StoredBatchInfo::from(&this).hash(), - }, - prev_batch: LastBlockCommit { - info: StoredBatchInfo::from(&prev).hash(), - }, - }) - } - - /// Loads an `L1BatchWithWitness`. - pub async fn load_batch_with_witness( - &self, - ctx: &ctx::Ctx, - n: L1BatchNumber, - ) -> ctx::Result { - L1BatchWithWitness::load(ctx, n, &self.pool, &self.tree_reader).await + pub fn last_sealed_batch(&self) -> attester::BatchNumber { + attester::BatchNumber((self.last_batch.0 - (!self.batch_sealed) as u32).into()) } /// Connects to the json RPC endpoint exposed by the state keeper. @@ -456,6 +421,40 @@ impl StateKeeper { .await } + pub async fn run_temporary_fetcher( + self, + ctx: &ctx::Ctx, + client: Box>, + ) -> ctx::Result<()> { + scope::run!(ctx, |ctx, s| async { + let payload_queue = self + .pool + .connection(ctx) + .await + .wrap("connection()")? + .new_payload_queue(ctx, self.actions_sender, self.sync_state.clone()) + .await + .wrap("new_payload_queue()")?; + let (store, runner) = Store::new( + ctx, + self.pool.clone(), + Some(payload_queue), + Some(client.clone()), + ) + .await + .wrap("Store::new()")?; + s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + en::EN { + pool: self.pool.clone(), + client, + sync_state: self.sync_state.clone(), + } + .temporary_block_fetcher(ctx, &store) + .await + }) + .await + } + /// Runs consensus node for the external node. pub async fn run_consensus( self, @@ -474,6 +473,7 @@ impl StateKeeper { cfgs.config, cfgs.secrets, cfgs.net.build_version, + cfgs.enable_pregenesis, ) .await } @@ -571,7 +571,9 @@ impl StateKeeperRunner { self.pool.0.clone(), Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let io = ExternalIO::new( self.pool.0.clone(), @@ -678,7 +680,9 @@ impl StateKeeperRunner { self.pool.0.clone(), Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index 35d849ae616..2701a986e9e 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use rand::Rng as _; -use test_casing::test_casing; +use test_casing::{test_casing, Product}; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ @@ -9,10 +9,10 @@ use zksync_consensus_roles::{ }; use zksync_dal::consensus_dal; use zksync_test_account::Account; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_types::ProtocolVersionId; use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::VERSIONS; +use super::{POLL_INTERVAL, PREGENESIS, VERSIONS}; use crate::{ mn::run_main_node, registry::{testonly, Registry}, @@ -34,13 +34,13 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); // Setup nontrivial genesis. - while sk.last_sealed_batch() < L1BatchNumber(3) { + while sk.last_sealed_batch() < attester::BatchNumber(3) { sk.push_random_blocks(rng, account, 10).await; } let mut setup = SetupSpec::new(rng, 3); setup.first_block = sk.last_block(); let first_batch = sk.last_batch(); - let setup = Setup::from(setup); + let setup = Setup::from_spec(rng, setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; conn.try_update_global_config( ctx, @@ -54,7 +54,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { .wrap("try_update_global_config()")?; // Make sure that the first_batch is actually sealed. sk.seal_batch().await; - pool.wait_for_batch(ctx, first_batch).await?; + pool.wait_for_batch_info(ctx, first_batch, POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // Connect to API endpoint. let api = sk.connect(ctx).await?; @@ -77,18 +79,18 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { let status = fetch_status().await?; assert_eq!( status.next_batch_to_attest, - attester::BatchNumber(first_batch.0.into()) + attester::BatchNumber(first_batch.0) ); tracing::info!("Insert a cert"); { let mut conn = pool.connection(ctx).await?; let number = status.next_batch_to_attest; - let hash = conn.batch_hash(ctx, number).await?.unwrap(); + let info = conn.batch_info(ctx, number).await?.unwrap(); let gcfg = conn.global_config(ctx).await?.unwrap(); let m = attester::Batch { number, - hash, + hash: consensus_dal::batch_hash(&info), genesis: gcfg.genesis.hash(), }; let mut sigs = attester::MultiSig::default(); @@ -124,9 +126,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -#[test_casing(2, VERSIONS)] +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_multiple_attesters(version: ProtocolVersionId) { +async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); @@ -135,7 +137,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId) { let account = &mut Account::random(); let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let mut cfgs = new_configs(rng, &setup, NODES); + let mut cfgs = new_configs(rng, &setup, NODES, pregenesis); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; @@ -235,7 +237,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId) { } tracing::info!("Wait for the batches to be attested"); - let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); + let want_last = attester::BatchNumber(validator.last_sealed_batch().0); validator_pool .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs deleted file mode 100644 index f0cae7f2c02..00000000000 --- a/core/node/consensus/src/tests/batch.rs +++ /dev/null @@ -1,124 +0,0 @@ -use test_casing::{test_casing, Product}; -use zksync_concurrency::{ctx, scope}; -use zksync_consensus_roles::validator; -use zksync_test_account::Account; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; - -use super::{FROM_SNAPSHOT, VERSIONS}; -use crate::{storage::ConnectionPool, testonly}; - -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test] -async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let pool = ConnectionPool::test(from_snapshot, version).await; - let account = &mut Account::random(); - - // Fill storage with unsigned L2 blocks and L1 batches in a way that the - // last L1 batch is guaranteed to have some L2 blocks executed in it. - scope::run!(ctx, |ctx, s| async { - // Start state keeper. - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - - for _ in 0..3 { - for _ in 0..2 { - sk.push_random_block(rng, account).await; - } - sk.seal_batch().await; - } - sk.push_random_block(rng, account).await; - - pool.wait_for_payload(ctx, sk.last_block()).await?; - - Ok(()) - }) - .await - .unwrap(); - - // Now we can try to retrieve the batch. - scope::run!(ctx, |ctx, _s| async { - let mut conn = pool.connection(ctx).await?; - let batches = conn.batches_range(ctx).await?; - let last = batches.last.expect("last is set"); - let (min, max) = conn - .get_l2_block_range_of_l1_batch(ctx, last) - .await? - .unwrap(); - - let last_batch = conn - .get_batch(ctx, last) - .await? - .expect("last batch can be retrieved"); - - assert_eq!( - last_batch.payloads.len(), - (max.0 - min.0) as usize, - "all block payloads present" - ); - - let first_payload = last_batch - .payloads - .first() - .expect("last batch has payloads"); - - let want_payload = conn.payload(ctx, min).await?.expect("payload is in the DB"); - let want_payload = want_payload.encode(); - - assert_eq!( - first_payload, &want_payload, - "first payload is the right number" - ); - - anyhow::Ok(()) - }) - .await - .unwrap(); -} - -/// Tests that generated L1 batch witnesses can be verified successfully. -/// TODO: add tests for verification failures. -#[test_casing(2, VERSIONS)] -#[tokio::test] -async fn test_batch_witness(version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let account = &mut Account::random(); - let to_fund = &[account.address]; - - scope::run!(ctx, |ctx, s| async { - let pool = ConnectionPool::from_genesis(version).await; - let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx, to_fund)); - - tracing::info!("analyzing storage"); - { - let mut conn = pool.connection(ctx).await.unwrap(); - let mut n = validator::BlockNumber(0); - while let Some(p) = conn.payload(ctx, n).await? { - tracing::info!("block[{n}] = {p:?}"); - n = n + 1; - } - } - - // Seal a bunch of batches. - node.push_random_blocks(rng, account, 10).await; - node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; - // We can verify only 2nd batch onward, because - // batch witness verifies parent of the last block of the - // previous batch (and 0th batch contains only 1 block). - for n in 2..=node.last_sealed_batch().0 { - let n = L1BatchNumber(n); - let batch_with_witness = node.load_batch_with_witness(ctx, n).await?; - let commit = node.load_batch_commit(ctx, n).await?; - batch_with_witness.verify(&commit)?; - } - Ok(()) - }) - .await - .unwrap(); -} diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 52abe3c810c..8da17cfba8a 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -2,29 +2,121 @@ use anyhow::Context as _; use rand::Rng as _; use test_casing::{test_casing, Product}; use tracing::Instrument as _; -use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus as config; use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_roles::{ node, validator, validator::testonly::{Setup, SetupSpec}, }; -use zksync_consensus_storage::BlockStore; +use zksync_consensus_storage::{BlockStore, PersistentBlockStore}; use zksync_dal::consensus_dal; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; use crate::{ + en::TEMPORARY_FETCHER_THRESHOLD, mn::run_main_node, storage::{ConnectionPool, Store}, testonly, }; mod attestation; -mod batch; const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; const FROM_SNAPSHOT: [bool; 2] = [true, false]; +const PREGENESIS: [bool; 2] = [true, false]; +const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + +#[test_casing(2, VERSIONS)] +#[tokio::test] +async fn test_verify_pregenesis_block(version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let mut setup = SetupSpec::new(rng, 3); + setup.first_block = validator::BlockNumber(1000); + let setup = Setup::from_spec(rng, setup); + let cfg = consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + seed_peers: [].into(), + }; + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Start state keeper."); + let pool = ConnectionPool::test(/*from_snapshot=*/ false, version).await; + pool.connection(ctx) + .await + .wrap("connection()")? + .try_update_global_config(ctx, &cfg) + .await + .wrap("try_update_global_config()")?; + let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + + tracing::info!("Populate storage with a bunch of blocks."); + sk.push_random_blocks(rng, account, 5).await; + sk.seal_batch().await; + let blocks: Vec<_> = pool + .wait_for_blocks(ctx, sk.last_block()) + .await + .context("wait_for_blocks()")? + .into_iter() + .map(|b| match b { + validator::Block::PreGenesis(b) => b, + _ => panic!(), + }) + .collect(); + assert!(!blocks.is_empty()); + + tracing::info!("Create another store"); + let pool = ConnectionPool::test(/*from_snapshot=*/ false, version).await; + pool.connection(ctx) + .await + .wrap("connection()")? + .try_update_global_config(ctx, &cfg) + .await + .wrap("try_update_global_config()")?; + let (store, runner) = Store::new( + ctx, + pool.clone(), + None, + Some(sk.connect(ctx).await.unwrap()), + ) + .await + .unwrap(); + s.spawn_bg(runner.run(ctx)); + + tracing::info!("All the blocks from the main node should be valid."); + for b in &blocks { + store.verify_pregenesis_block(ctx, b).await.unwrap(); + } + tracing::info!("Malformed blocks should not be valid"); + for b in &blocks { + let mut p = consensus_dal::Payload::decode(&b.payload).unwrap(); + // Arbitrary small change. + p.timestamp = rng.gen(); + store + .verify_pregenesis_block( + ctx, + &validator::PreGenesisBlock { + number: b.number, + justification: b.justification.clone(), + payload: p.encode(), + }, + ) + .await + .unwrap_err(); + } + + Ok(()) + }) + .await + .unwrap(); +} #[test_casing(2, VERSIONS)] #[tokio::test] @@ -36,7 +128,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { let account = &mut Account::random(); // Fill storage with unsigned L2 blocks. - // Fetch a suffix of blocks that we will generate (fake) certs for. + // Fetch a suffix of blocks that we will generate certs for. let want = scope::run!(ctx, |ctx, s| async { // Start state keeper. let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; @@ -44,8 +136,9 @@ async fn test_validator_block_store(version: ProtocolVersionId) { sk.push_random_blocks(rng, account, 10).await; pool.wait_for_payload(ctx, sk.last_block()).await?; let mut setup = SetupSpec::new(rng, 3); - setup.first_block = validator::BlockNumber(4); - let mut setup = Setup::from(setup); + setup.first_block = validator::BlockNumber(0); + setup.first_pregenesis_block = setup.first_block; + let mut setup = Setup::from_spec(rng, setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; conn.try_update_global_config( ctx, @@ -75,7 +168,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // Insert blocks one by one and check the storage state. for (i, block) in want.iter().enumerate() { scope::run!(ctx, |ctx, s| async { - let (store, runner) = Store::new(ctx, pool.clone(), None).await.unwrap(); + let (store, runner) = Store::new(ctx, pool.clone(), None, None).await.unwrap(); s.spawn_bg(runner.run(ctx)); let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())).await.unwrap(); @@ -85,10 +178,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { .wait_until_persisted(ctx, block.number()) .await .unwrap(); - let got = pool - .wait_for_block_certificates(ctx, block.number()) - .await - .unwrap(); + let got = pool.wait_for_blocks(ctx, block.number()).await.unwrap(); assert_eq!(want[..=i], got); Ok(()) }) @@ -100,14 +190,14 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_validator(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -149,9 +239,9 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Verify all certificates"); pool - .wait_for_block_certificates_and_verify(ctx, sk.last_block()) + .wait_for_blocks_and_verify_certs(ctx, sk.last_block()) .await - .context("wait_for_block_certificates_and_verify()")?; + .context("wait_for_blocks_and_verify_certs()")?; Ok(()) }) .await @@ -164,14 +254,14 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { } // Test running a validator node and 2 full nodes recovered from different snapshots. -#[test_casing(2, VERSIONS)] +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { +async fn test_nodes_from_various_snapshots(version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -226,15 +316,15 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { tracing::info!("produce more blocks and compare storages"); validator.push_random_blocks(rng, account, 5).await; let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; // node stores should be suffixes for validator store. for got in [ node_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?, node_pool2 - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?, ] { assert_eq!(want[want.len() - got.len()..], got[..]); @@ -245,14 +335,14 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { .unwrap(); } -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let mut validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let mut validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -304,12 +394,12 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; assert_eq!( want, node_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); Ok(()) @@ -322,16 +412,16 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { // Test running a validator node and a couple of full nodes. // Validator is producing signed blocks and fetchers are expected to fetch // them directly or indirectly. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 2; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); // topology: @@ -391,13 +481,15 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { // Note that block from before and after genesis have to be fetched. validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); + tracing::info!("Waiting for the validator to produce block {want_last}."); let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; + tracing::info!("Waiting for the nodes to fetch block {want_last}."); for pool in &node_pools { assert_eq!( want, - pool.wait_for_block_certificates_and_verify(ctx, want_last) + pool.wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); } @@ -408,16 +500,16 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { } // Test running external node (non-leader) validators. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 3; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); - let cfgs = testonly::new_configs(rng, &setup, 1); + let cfgs = testonly::new_configs(rng, &setup, 1, pregenesis); let account = &mut Account::random(); // Run all nodes in parallel. @@ -475,12 +567,12 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { main_node.push_random_blocks(rng, account, 5).await; let want_last = main_node.last_block(); let want = main_node_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; for pool in &ext_node_pools { assert_eq!( want, - pool.wait_for_block_certificates_and_verify(ctx, want_last) + pool.wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); } @@ -491,14 +583,18 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { } // Test fetcher back filling missing certs. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_p2p_fetcher_backfill_certs( + from_snapshot: bool, + version: ProtocolVersionId, + pregenesis: bool, +) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -555,10 +651,10 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); validator.push_random_blocks(rng, account, 3).await; let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; let got = node_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; assert_eq!(want, got); Ok(()) @@ -571,14 +667,144 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV .unwrap(); } -#[test_casing(2, VERSIONS)] +// Test temporary fetcher fetching blocks if a lot of certs are missing. +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[tokio::test] +async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + // We force certs to be missing on EN by having 1 of the validators permanently offline. + // This way no blocks will be finalized at all, so no one will have certs. + let setup = Setup::new(rng, 2); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + // Wait for the consensus to be initialized. + while ctx.wait(client.consensus_global_config()).await??.is_none() { + ctx.sleep(time::Duration::milliseconds(100)).await?; + } + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + tracing::info!("Run centralized fetcher, so that there is a lot of certs missing."); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_fetcher(ctx, client.clone())); + validator + .push_random_blocks(rng, account, TEMPORARY_FETCHER_THRESHOLD as usize + 1) + .await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + tracing::info!( + "Run p2p fetcher. Blocks should be fetched by the temporary fetcher anyway." + ); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + Ok(()) + }) + .await + .unwrap(); +} + +// Test that temporary fetcher terminates once enough blocks have certs. +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_with_pruning(version: ProtocolVersionId) { +async fn test_temporary_fetcher_termination(from_snapshot: bool, version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 1); + let pregenesis = true; + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + // Run the EN so the consensus is initialized on EN and wait for it to sync. + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + // Run the temporary fetcher. It should terminate immediately, since EN is synced. + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + node.run_temporary_fetcher(ctx, client).await?; + + Ok(()) + }) + .await + .unwrap(); +} + +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] +#[tokio::test] +async fn test_with_pruning(version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -642,27 +868,28 @@ async fn test_with_pruning(version: ProtocolVersionId) { validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool - .wait_for_batch(ctx, validator.last_sealed_batch()) - .await?; + .wait_for_batch_info(ctx, validator.last_sealed_batch(), POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // The main node is not supposed to be pruned. In particular `ConsensusDal::attestation_status` // does not look for where the last prune happened at, and thus if we prune the block genesis // points at, we might never be able to start the Executor. tracing::info!("Wait until the external node has all the batches we want to prune"); node_pool - .wait_for_batch(ctx, to_prune.next()) + .wait_for_batch_info(ctx, to_prune.next(), POLL_INTERVAL) .await - .context("wait_for_batch()")?; + .wrap("wait_for_batch_info()")?; tracing::info!("Prune some blocks and sync more"); node_pool .prune_batches(ctx, to_prune) .await - .context("prune_batches")?; + .wrap("prune_batches")?; validator.push_random_blocks(rng, account, 5).await; node_pool - .wait_for_block_certificates(ctx, validator.last_block()) + .wait_for_blocks(ctx, validator.last_block()) .await - .context("wait_for_block_certificates()")?; + .wrap("wait_for_blocks()")?; Ok(()) }) .await diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 149e6b3ccb0..46b84c34061 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -1,11 +1,15 @@ +use std::sync::Arc; + use anyhow::Context as _; use tokio::runtime::Handle; -use zksync_concurrency::{ctx, error::Wrap as _}; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; use zksync_consensus_roles::attester; use zksync_state::PostgresStorage; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ethabi, fee::Fee, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256}; -use zksync_vm_executor::oneshot::{CallOrExecute, MainOneshotExecutor, OneshotEnvParameters}; +use zksync_vm_executor::oneshot::{ + CallOrExecute, MainOneshotExecutor, MultiVMBaseSystemContracts, OneshotEnvParameters, +}; use zksync_vm_interface::{ executor::OneshotExecutor, ExecutionResult, OneshotTracingParams, TxExecutionArgs, }; @@ -23,16 +27,17 @@ pub(crate) struct VM { impl VM { /// Constructs a new `VM` instance. pub async fn new(pool: ConnectionPool) -> Self { + let base_system_contracts = + scope::wait_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking).await; Self { pool, // L2 chain ID and fee account don't seem to matter for calls, hence the use of default values. - options: OneshotEnvParameters::for_execution( + options: OneshotEnvParameters::new( + Arc::new(base_system_contracts), L2ChainId::default(), AccountTreeId::default(), u32::MAX, - ) - .await - .expect("OneshotExecutorOptions"), + ), executor: MainOneshotExecutor::new(usize::MAX), } } diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index d2d84669978..d87fcf935b0 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -22,7 +22,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, ethabi::{ParamType, Token}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, Address, L1BatchNumber, ProtocolVersionId, H256, U256, }; @@ -233,8 +233,8 @@ impl LocalL1BatchCommitData { let da = detect_da(protocol_version, reference) .context("cannot detect DA source from reference commitment token")?; - // For `PubdataDA::Calldata`, it's required that the pubdata fits into a single blob. - if matches!(da, PubdataDA::Calldata) { + // For `PubdataSendingMode::Calldata`, it's required that the pubdata fits into a single blob. + if matches!(da, PubdataSendingMode::Calldata) { let pubdata_len = self .l1_batch .header @@ -268,7 +268,7 @@ impl LocalL1BatchCommitData { pub fn detect_da( protocol_version: ProtocolVersionId, reference: &Token, -) -> Result { +) -> Result { /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; const PUBDATA_SOURCE_BLOBS: u8 = 1; @@ -279,7 +279,7 @@ pub fn detect_da( } if protocol_version.is_pre_1_4_2() { - return Ok(PubdataDA::Calldata); + return Ok(PubdataSendingMode::Calldata); } let reference = match reference { @@ -301,9 +301,9 @@ pub fn detect_da( ))), }; match last_reference_token.first() { - Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataDA::Calldata), - Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataDA::Blobs), - Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataDA::Custom), + Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataSendingMode::Calldata), + Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataSendingMode::Blobs), + Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataSendingMode::Custom), Some(&byte) => Err(parse_error(format!( "unexpected first byte of the last reference token; expected one of [{PUBDATA_SOURCE_CALLDATA}, {PUBDATA_SOURCE_BLOBS}], \ got {byte}" diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index 296780513e0..b1c78b481a8 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -64,7 +64,7 @@ pub(crate) fn build_commit_tx_input_data( let tokens = CommitBatches { last_committed_l1_batch: &batches[0], l1_batches: batches, - pubdata_da: PubdataDA::Calldata, + pubdata_da: PubdataSendingMode::Calldata, mode, } .into_tokens(); @@ -168,7 +168,7 @@ fn build_commit_tx_input_data_is_correct(commitment_mode: L1BatchCommitmentMode) .unwrap(); assert_eq!( commit_data, - CommitBatchInfo::new(commitment_mode, batch, PubdataDA::Calldata).into_token(), + CommitBatchInfo::new(commitment_mode, batch, PubdataSendingMode::Calldata).into_token(), ); } } diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index 60b65067f48..fa2f15920bd 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -37,3 +37,6 @@ blake2b_simd.workspace = true jsonrpsee = { workspace = true, features = ["ws-client"] } parity-scale-codec = { workspace = true, features = ["derive"] } subxt-signer = { workspace = true, features = ["sr25519", "native"] } +reqwest = { workspace = true } +bytes = { workspace = true } +backon.workspace = true diff --git a/core/node/da_clients/src/avail/client.rs b/core/node/da_clients/src/avail/client.rs index 7718691bf18..46d652d5713 100644 --- a/core/node/da_clients/src/avail/client.rs +++ b/core/node/da_clients/src/avail/client.rs @@ -1,34 +1,133 @@ -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, sync::Arc, time::Duration}; +use anyhow::anyhow; use async_trait::async_trait; use jsonrpsee::ws_client::WsClientBuilder; +use serde::{Deserialize, Serialize}; use subxt_signer::ExposeSecret; -use zksync_config::configs::da_client::avail::{AvailConfig, AvailSecrets}; +use zksync_config::configs::da_client::avail::{AvailClientConfig, AvailConfig, AvailSecrets}; use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; +use zksync_types::{ + ethabi::{self, Token}, + web3::contract::Tokenize, + H256, U256, +}; + +use crate::avail::sdk::{GasRelayClient, RawAvailClient}; -use crate::avail::sdk::RawAvailClient; +#[derive(Debug, Clone)] +enum AvailClientMode { + Default(Box), + GasRelay(GasRelayClient), +} /// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. #[derive(Debug, Clone)] pub struct AvailClient { config: AvailConfig, - sdk_client: Arc, + sdk_client: Arc, + api_client: Arc, // bridge API reqwest client +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct BridgeAPIResponse { + blob_root: Option, + bridge_root: Option, + data_root_index: Option, + data_root_proof: Option>, + leaf: Option, + leaf_index: Option, + leaf_proof: Option>, + range_hash: Option, + error: Option, +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct MerkleProofInput { + // proof of inclusion for the data root + data_root_proof: Vec, + // proof of inclusion of leaf within blob/bridge root + leaf_proof: Vec, + // abi.encodePacked(startBlock, endBlock) of header range commitment on vectorx + range_hash: H256, + // index of the data root in the commitment tree + data_root_index: U256, + // blob root to check proof against, or reconstruct the data root + blob_root: H256, + // bridge root to check proof against, or reconstruct the data root + bridge_root: H256, + // leaf being proven + leaf: H256, + // index of the leaf in the blob/bridge root tree + leaf_index: U256, +} + +impl Tokenize for MerkleProofInput { + fn into_tokens(self) -> Vec { + vec![Token::Tuple(vec![ + Token::Array( + self.data_root_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::Array( + self.leaf_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::FixedBytes(self.range_hash.as_bytes().to_vec()), + Token::Uint(self.data_root_index), + Token::FixedBytes(self.blob_root.as_bytes().to_vec()), + Token::FixedBytes(self.bridge_root.as_bytes().to_vec()), + Token::FixedBytes(self.leaf.as_bytes().to_vec()), + Token::Uint(self.leaf_index), + ])] + } } impl AvailClient { pub async fn new(config: AvailConfig, secrets: AvailSecrets) -> anyhow::Result { - let seed_phrase = secrets - .seed_phrase - .ok_or_else(|| anyhow::anyhow!("seed phrase"))?; - let sdk_client = RawAvailClient::new(config.app_id, seed_phrase.0.expose_secret()).await?; - - Ok(Self { - config, - sdk_client: Arc::new(sdk_client), - }) + let api_client = Arc::new(reqwest::Client::new()); + match config.config.clone() { + AvailClientConfig::GasRelay(conf) => { + let gas_relay_api_key = secrets + .gas_relay_api_key + .ok_or_else(|| anyhow::anyhow!("Gas relay API key is missing"))?; + let gas_relay_client = GasRelayClient::new( + &conf.gas_relay_api_url, + gas_relay_api_key.0.expose_secret(), + conf.max_retries, + Arc::clone(&api_client), + ) + .await?; + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::GasRelay(gas_relay_client)), + api_client, + }) + } + AvailClientConfig::FullClient(conf) => { + let seed_phrase = secrets + .seed_phrase + .ok_or_else(|| anyhow::anyhow!("Seed phrase is missing"))?; + // these unwraps are safe because we validate in protobuf config + let sdk_client = + RawAvailClient::new(conf.app_id, seed_phrase.0.expose_secret()).await?; + + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::Default(Box::new(sdk_client))), + api_client, + }) + } + } } } @@ -39,37 +138,83 @@ impl DataAvailabilityClient for AvailClient { _: u32, // batch_number data: Vec, ) -> anyhow::Result { - let client = WsClientBuilder::default() - .build(self.config.api_node_url.as_str()) - .await - .map_err(to_non_retriable_da_error)?; + match self.sdk_client.as_ref() { + AvailClientMode::Default(client) => { + let default_config = match &self.config.config { + AvailClientConfig::FullClient(conf) => conf, + _ => unreachable!(), // validated in protobuf config + }; + let ws_client = WsClientBuilder::default() + .build(default_config.api_node_url.clone().as_str()) + .await + .map_err(to_non_retriable_da_error)?; - let extrinsic = self - .sdk_client - .build_extrinsic(&client, data) - .await - .map_err(to_non_retriable_da_error)?; + let extrinsic = client + .build_extrinsic(&ws_client, data) + .await + .map_err(to_non_retriable_da_error)?; - let block_hash = self - .sdk_client - .submit_extrinsic(&client, extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - let tx_id = self - .sdk_client - .get_tx_id(&client, block_hash.as_str(), extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - - Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + let block_hash = client + .submit_extrinsic(&ws_client, extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + let tx_id = client + .get_tx_id(&ws_client, block_hash.as_str(), extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + } + AvailClientMode::GasRelay(client) => { + let (block_hash, extrinsic_index) = client + .post_data(data) + .await + .map_err(to_retriable_da_error)?; + Ok(DispatchResponse { + blob_id: format!("{:x}:{}", block_hash, extrinsic_index), + }) + } + } } async fn get_inclusion_data( &self, - _blob_id: &str, + blob_id: &str, ) -> anyhow::Result, DAError> { - // TODO: implement inclusion data retrieval - Ok(Some(InclusionData { data: vec![] })) + let (block_hash, tx_idx) = blob_id.split_once(':').ok_or_else(|| DAError { + error: anyhow!("Invalid blob ID format"), + is_retriable: false, + })?; + let url = format!( + "{}/eth/proof/{}?index={}", + self.config.bridge_api_url, block_hash, tx_idx + ); + + let response = self + .api_client + .get(&url) + .timeout(Duration::from_secs(self.config.timeout as u64)) + .send() + .await + .map_err(to_retriable_da_error)?; + + let bridge_api_data = response + .json::() + .await + .map_err(to_retriable_da_error)?; + + let attestation_data: MerkleProofInput = MerkleProofInput { + data_root_proof: bridge_api_data.data_root_proof.unwrap(), + leaf_proof: bridge_api_data.leaf_proof.unwrap(), + range_hash: bridge_api_data.range_hash.unwrap(), + data_root_index: bridge_api_data.data_root_index.unwrap(), + blob_root: bridge_api_data.blob_root.unwrap(), + bridge_root: bridge_api_data.bridge_root.unwrap(), + leaf: bridge_api_data.leaf.unwrap(), + leaf_index: bridge_api_data.leaf_index.unwrap(), + }; + Ok(Some(InclusionData { + data: ethabi::encode(&attestation_data.into_tokens()), + })) } fn clone_boxed(&self) -> Box { @@ -87,3 +232,10 @@ pub fn to_non_retriable_da_error(error: impl Into) -> DAError { is_retriable: false, } } + +pub fn to_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: true, + } +} diff --git a/core/node/da_clients/src/avail/sdk.rs b/core/node/da_clients/src/avail/sdk.rs index 002422109d0..f693280ba4a 100644 --- a/core/node/da_clients/src/avail/sdk.rs +++ b/core/node/da_clients/src/avail/sdk.rs @@ -1,18 +1,22 @@ //! Minimal reimplementation of the Avail SDK client required for the DA client implementation. //! This is considered to be a temporary solution until a mature SDK is available on crates.io -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc, time}; +use backon::{ConstantBuilder, Retryable}; +use bytes::Bytes; use jsonrpsee::{ core::client::{Client, ClientT, Subscription, SubscriptionClientT}, rpc_params, }; use parity_scale_codec::{Compact, Decode, Encode}; use scale_encode::EncodeAsFields; +use serde::{Deserialize, Serialize}; use subxt_signer::{ bip39::Mnemonic, sr25519::{Keypair, Signature}, }; +use zksync_types::H256; use crate::avail::client::to_non_retriable_da_error; @@ -287,7 +291,7 @@ impl RawAvailClient { let status = sub.next().await.transpose()?; if status.is_some() && status.as_ref().unwrap().is_object() { - if let Some(block_hash) = status.unwrap().get("inBlock") { + if let Some(block_hash) = status.unwrap().get("finalized") { break block_hash .as_str() .ok_or_else(|| anyhow::anyhow!("Invalid block hash"))? @@ -369,3 +373,95 @@ fn ss58hash(data: &[u8]) -> Vec { ctx.update(data); ctx.finalize().to_vec() } + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Debug, Clone)] +pub(crate) struct GasRelayClient { + api_url: String, + api_key: String, + max_retries: usize, + api_client: Arc, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmissionResponse { + submission_id: String, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPIStatusResponse { + submission: GasRelayAPISubmission, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmission { + block_hash: Option, + extrinsic_index: Option, +} + +impl GasRelayClient { + const DEFAULT_INCLUSION_DELAY: time::Duration = time::Duration::from_secs(60); + const RETRY_DELAY: time::Duration = time::Duration::from_secs(5); + pub(crate) async fn new( + api_url: &str, + api_key: &str, + max_retries: usize, + api_client: Arc, + ) -> anyhow::Result { + Ok(Self { + api_url: api_url.to_owned(), + api_key: api_key.to_owned(), + max_retries, + api_client, + }) + } + + pub(crate) async fn post_data(&self, data: Vec) -> anyhow::Result<(H256, u64)> { + let submit_url = format!("{}/user/submit_raw_data?token=ethereum", &self.api_url); + // send the data to the gas relay + let submit_response = self + .api_client + .post(&submit_url) + .body(Bytes::from(data)) + .header("Content-Type", "text/plain") + .header("Authorization", &self.api_key) + .send() + .await?; + + let submit_response = submit_response + .json::() + .await?; + + let status_url = format!( + "{}/user/get_submission_info?submission_id={}", + self.api_url, submit_response.submission_id + ); + + tokio::time::sleep(Self::DEFAULT_INCLUSION_DELAY).await; + let status_response = (|| async { + self.api_client + .get(&status_url) + .header("Authorization", &self.api_key) + .send() + .await + }) + .retry( + &ConstantBuilder::default() + .with_delay(Self::RETRY_DELAY) + .with_max_times(self.max_retries), + ) + .await?; + + let status_response = status_response.json::().await?; + let (block_hash, extrinsic_index) = ( + status_response.submission.block_hash.ok_or_else(|| { + anyhow::anyhow!("Block hash not found in the response from the gas relay") + })?, + status_response.submission.extrinsic_index.ok_or_else(|| { + anyhow::anyhow!("Extrinsic index not found in the response from the gas relay") + })?, + ); + + Ok((block_hash, extrinsic_index)) + } +} diff --git a/core/node/db_pruner/src/tests.rs b/core/node/db_pruner/src/tests.rs index bb05e08e411..99fbada423d 100644 --- a/core/node/db_pruner/src/tests.rs +++ b/core/node/db_pruner/src/tests.rs @@ -114,7 +114,6 @@ async fn insert_l2_blocks( l1_tx_count: 0, l2_tx_count: 0, fee_account_address: Address::repeat_byte(1), - pubdata_params: Default::default(), base_fee_per_gas: 0, gas_per_pubdata_limit: 0, batch_fee_input: Default::default(), @@ -123,6 +122,7 @@ async fn insert_l2_blocks( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/node/eth_sender/src/aggregated_operations.rs b/core/node/eth_sender/src/aggregated_operations.rs index 2dfaf594265..5271d42d3b7 100644 --- a/core/node/eth_sender/src/aggregated_operations.rs +++ b/core/node/eth_sender/src/aggregated_operations.rs @@ -3,13 +3,17 @@ use std::ops; use zksync_l1_contract_interface::i_executor::methods::{ExecuteBatches, ProveBatches}; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, - pubdata_da::PubdataDA, L1BatchNumber, ProtocolVersionId, + pubdata_da::PubdataSendingMode, L1BatchNumber, ProtocolVersionId, }; #[allow(clippy::large_enum_variant)] #[derive(Debug, Clone)] pub enum AggregatedOperation { - Commit(L1BatchWithMetadata, Vec, PubdataDA), + Commit( + L1BatchWithMetadata, + Vec, + PubdataSendingMode, + ), PublishProofOnchain(ProveBatches), Execute(ExecuteBatches), } diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index 4d8cd47734c..e4f84948c6e 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -14,7 +14,7 @@ use zksync_types::{ helpers::unix_timestamp_ms, l1::L1Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, L1BatchNumber, ProtocolVersionId, }; @@ -39,7 +39,7 @@ pub struct Aggregator { /// means no wait is needed: nonces will still provide the correct ordering of /// transactions. operate_4844_mode: bool, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, commitment_mode: L1BatchCommitmentMode, priority_merkle_tree: MiniMerkleTree, } @@ -52,7 +52,7 @@ impl Aggregator { commitment_mode: L1BatchCommitmentMode, connection: &mut Connection<'_, Core>, ) -> anyhow::Result { - let pubdata_da = config.pubdata_sending_mode.into(); + let pubdata_da = config.pubdata_sending_mode; let priority_tree_start_index = config.priority_tree_start_index.unwrap_or(0); let priority_op_hashes = connection @@ -543,7 +543,7 @@ impl Aggregator { } } - pub fn pubdata_da(&self) -> PubdataDA { + pub fn pubdata_da(&self) -> PubdataSendingMode { self.pubdata_da } diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 2c87848dcc3..ebd1568edb6 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -23,7 +23,7 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result; } @@ -32,6 +32,7 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { pub(crate) struct GasAdjusterFeesOracle { pub gas_adjuster: Arc, pub max_acceptable_priority_fee_in_gwei: u64, + pub time_in_mempool_in_l1_blocks_cap: u32, } impl GasAdjusterFeesOracle { @@ -80,11 +81,16 @@ impl GasAdjusterFeesOracle { fn calculate_fees_no_blob_sidecar( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, ) -> Result { - // cap it at 6h to not allow nearly infinite values when a tx is stuck for a long time - let capped_time_in_mempool = min(time_in_mempool, 1800); - let mut base_fee_per_gas = self.gas_adjuster.get_base_fee(capped_time_in_mempool); + // we cap it to not allow nearly infinite values when a tx is stuck for a long time + let capped_time_in_mempool_in_l1_blocks = min( + time_in_mempool_in_l1_blocks, + self.time_in_mempool_in_l1_blocks_cap, + ); + let mut base_fee_per_gas = self + .gas_adjuster + .get_base_fee(capped_time_in_mempool_in_l1_blocks); self.assert_fee_is_not_zero(base_fee_per_gas, "base"); if let Some(previous_sent_tx) = previous_sent_tx { self.verify_base_fee_not_too_low_on_resend( @@ -162,14 +168,14 @@ impl EthFeesOracle for GasAdjusterFeesOracle { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result { let has_blob_sidecar = operator_type == OperatorType::Blob; if has_blob_sidecar { self.calculate_fees_with_blob_sidecar(previous_sent_tx) } else { - self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool) + self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool_in_l1_blocks) } } } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 1c618a53795..ccdc93440ac 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -19,7 +19,7 @@ use zksync_types::{ ethabi::{Function, Token}, l2_to_l1_log::UserL2ToL1Log, protocol_version::{L1VerifierConfig, PACKED_SEMVER_MINOR_MASK}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, settlement::SettlementMode, web3::{contract::Error as Web3ContractError, BlockNumber}, Address, L2ChainId, ProtocolVersionId, SLChainId, H256, U256, @@ -144,19 +144,19 @@ impl EthTxAggregator { } pub(super) async fn get_multicall_data(&mut self) -> Result { - let calldata = self.generate_calldata_for_multicall(); + let (calldata, evm_emulator_hash_requested) = self.generate_calldata_for_multicall(); let args = CallFunctionArgs::new(&self.functions.aggregate3.name, calldata).for_contract( self.l1_multicall3_address, &self.functions.multicall_contract, ); let aggregate3_result: Token = args.call((*self.eth_client).as_ref()).await?; - self.parse_multicall_data(aggregate3_result) + self.parse_multicall_data(aggregate3_result, evm_emulator_hash_requested) } // Multicall's aggregate function accepts 1 argument - arrays of different contract calls. // The role of the method below is to tokenize input for multicall, which is actually a vector of tokens. // Each token describes a specific contract call. - pub(super) fn generate_calldata_for_multicall(&self) -> Vec { + pub(super) fn generate_calldata_for_multicall(&self) -> (Vec, bool) { const ALLOW_FAILURE: bool = false; // First zksync contract call @@ -215,14 +215,31 @@ impl EthTxAggregator { calldata: get_protocol_version_input, }; - // Convert structs into tokens and return vector with them - vec![ + let mut token_vec = vec![ get_bootloader_hash_call.into_token(), get_default_aa_hash_call.into_token(), get_verifier_params_call.into_token(), get_verifier_call.into_token(), get_protocol_version_call.into_token(), - ] + ]; + + let mut evm_emulator_hash_requested = false; + let get_l2_evm_emulator_hash_input = self + .functions + .get_evm_emulator_bytecode_hash + .as_ref() + .and_then(|f| f.encode_input(&[]).ok()); + if let Some(input) = get_l2_evm_emulator_hash_input { + let call = Multicall3Call { + target: self.state_transition_chain_contract, + allow_failure: ALLOW_FAILURE, + calldata: input, + }; + token_vec.insert(2, call.into_token()); + evm_emulator_hash_requested = true; + } + + (token_vec, evm_emulator_hash_requested) } // The role of the method below is to de-tokenize multicall call's result, which is actually a token. @@ -230,6 +247,7 @@ impl EthTxAggregator { pub(super) fn parse_multicall_data( &self, token: Token, + evm_emulator_hash_requested: bool, ) -> Result { let parse_error = |tokens: &[Token]| { Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( @@ -238,8 +256,9 @@ impl EthTxAggregator { }; if let Token::Array(call_results) = token { - // 5 calls are aggregated in multicall - if call_results.len() != 5 { + let number_of_calls = if evm_emulator_hash_requested { 6 } else { 5 }; + // 5 or 6 calls are aggregated in multicall + if call_results.len() != number_of_calls { return parse_error(&call_results); } let mut call_results_iterator = call_results.into_iter(); @@ -268,12 +287,31 @@ impl EthTxAggregator { ))); } let default_aa = H256::from_slice(&multicall3_default_aa); + + let evm_emulator = if evm_emulator_hash_requested { + let multicall3_evm_emulator = + Multicall3Result::from_token(call_results_iterator.next().unwrap())? + .return_data; + if multicall3_evm_emulator.len() != 32 { + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( + "multicall3 EVM emulator hash data is not of the len of 32: {:?}", + multicall3_evm_emulator + ), + ))); + } + Some(H256::from_slice(&multicall3_evm_emulator)) + } else { + None + }; + let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader, default_aa, + evm_emulator, }; - call_results_iterator.next().unwrap(); + call_results_iterator.next().unwrap(); // FIXME: why is this value requested? let multicall3_verifier_address = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; @@ -446,11 +484,12 @@ impl EthTxAggregator { &self.functions.post_gateway_commit }; - let l1_batch_for_sidecar = if PubdataDA::Blobs == self.aggregator.pubdata_da() { - Some(l1_batches[0].clone()) - } else { - None - }; + let l1_batch_for_sidecar = + if PubdataSendingMode::Blobs == self.aggregator.pubdata_da() { + Some(l1_batches[0].clone()) + } else { + None + }; Self::encode_commit_data(encoding_fn, &commit_data, l1_batch_for_sidecar) } diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 450443a652d..1aa233114a0 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -48,6 +48,7 @@ impl EthTxManager { let fees_oracle = GasAdjusterFeesOracle { gas_adjuster, max_acceptable_priority_fee_in_gwei: config.max_acceptable_priority_fee_in_gwei, + time_in_mempool_in_l1_blocks_cap: config.time_in_mempool_in_l1_blocks_cap, }; let l1_interface = Box::new(RealL1Interface { ethereum_gateway, @@ -111,7 +112,7 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, tx: &EthTx, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, current_block: L1BlockNumber, ) -> Result { let previous_sent_tx = storage @@ -127,7 +128,7 @@ impl EthTxManager { pubdata_price: _, } = self.fees_oracle.calculate_fees( &previous_sent_tx, - time_in_mempool, + time_in_mempool_in_l1_blocks, self.operator_type(tx), )?; @@ -602,13 +603,18 @@ impl EthTxManager { .await? { // New gas price depends on the time this tx spent in mempool. - let time_in_mempool = l1_block_numbers.latest.0 - sent_at_block; + let time_in_mempool_in_l1_blocks = l1_block_numbers.latest.0 - sent_at_block; // We don't want to return early in case resend does not succeed - // the error is logged anyway, but early returns will prevent // sending new operations. let _ = self - .send_eth_tx(storage, &tx, time_in_mempool, l1_block_numbers.latest) + .send_eth_tx( + storage, + &tx, + time_in_mempool_in_l1_blocks, + l1_block_numbers.latest, + ) .await?; } Ok(()) diff --git a/core/node/eth_sender/src/publish_criterion.rs b/core/node/eth_sender/src/publish_criterion.rs index 52d861ce0af..30f0820b148 100644 --- a/core/node/eth_sender/src/publish_criterion.rs +++ b/core/node/eth_sender/src/publish_criterion.rs @@ -8,7 +8,7 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, L1BatchNumber, }; @@ -202,7 +202,7 @@ impl L1BatchPublishCriterion for GasCriterion { pub struct DataSizeCriterion { pub op: AggregatedActionType, pub data_limit: usize, - pub pubdata_da: PubdataDA, + pub pubdata_da: PubdataSendingMode, pub commitment_mode: L1BatchCommitmentMode, } diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 3a62bdc8dbf..797db40919b 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use zksync_config::{ - configs::eth_sender::{ProofSendingMode, PubdataSendingMode, SenderConfig}, + configs::eth_sender::{ProofSendingMode, SenderConfig}, ContractsConfig, EthConfig, GasAdjusterConfig, }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; @@ -12,7 +12,7 @@ use zksync_node_test_utils::{create_l1_batch, l1_batch_metadata_to_commitment_ar use zksync_object_store::MockObjectStore; use zksync_types::{ aggregated_operations::AggregatedActionType, block::L1BatchHeader, - commitment::L1BatchCommitmentMode, eth_sender::EthTx, pubdata_da::PubdataDA, + commitment::L1BatchCommitmentMode, eth_sender::EthTx, pubdata_da::PubdataSendingMode, settlement::SettlementMode, Address, L1BatchNumber, ProtocolVersion, H256, }; @@ -23,6 +23,8 @@ use crate::{ Aggregator, EthTxAggregator, EthTxManager, }; +pub(super) const STATE_TRANSITION_CONTRACT_ADDRESS: Address = Address::repeat_byte(0xa0); + // Alias to conveniently call static methods of `ETHSender`. type MockEthTxManager = EthTxManager; @@ -172,7 +174,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -192,7 +194,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); l2_gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -212,7 +214,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway_blobs.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -267,7 +269,7 @@ impl EthSenderTester { // ZKsync contract address Address::random(), contracts_config.l1_multicall3_addr, - Address::random(), + STATE_TRANSITION_CONTRACT_ADDRESS, Default::default(), custom_commit_sender_addr, SettlementMode::SettlesToL1, @@ -494,9 +496,9 @@ impl EthSenderTester { pub async fn save_commit_tx(&mut self, l1_batch_number: L1BatchNumber) -> EthTx { assert_eq!(l1_batch_number, self.next_l1_batch_number_to_commit); let pubdata_mode = if self.pubdata_sending_mode == PubdataSendingMode::Blobs { - PubdataDA::Blobs + PubdataSendingMode::Blobs } else { - PubdataDA::Calldata + PubdataSendingMode::Calldata }; let operation = AggregatedOperation::Commit( l1_batch_with_metadata( diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index db49564093f..aab6d2e43d7 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -1,7 +1,9 @@ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_l1_contract_interface::i_executor::methods::ExecuteBatches; +use zksync_l1_contract_interface::{ + i_executor::methods::ExecuteBatches, multicall3::Multicall3Call, Tokenizable, +}; use zksync_node_test_utils::create_l1_batch; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -9,16 +11,19 @@ use zksync_types::{ commitment::{ L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata, }, + ethabi, ethabi::Token, helpers::unix_timestamp_ms, + web3, web3::contract::Error, - ProtocolVersionId, H256, + Address, ProtocolVersionId, H256, }; use crate::{ abstract_l1_interface::OperatorType, aggregated_operations::AggregatedOperation, - tester::{EthSenderTester, TestL1Batch}, + tester::{EthSenderTester, TestL1Batch, STATE_TRANSITION_CONTRACT_ADDRESS}, + zksync_functions::ZkSyncFunctions, EthSenderError, }; @@ -38,21 +43,59 @@ const COMMITMENT_MODES: [L1BatchCommitmentMode; 2] = [ L1BatchCommitmentMode::Validium, ]; -pub(crate) fn mock_multicall_response() -> Token { - Token::Array(vec![ - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 96])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 32])]), - Token::Tuple(vec![ - Token::Bool(true), - Token::Bytes( +pub(crate) fn mock_multicall_response(call: &web3::CallRequest) -> Token { + let functions = ZkSyncFunctions::default(); + let evm_emulator_getter_signature = functions + .get_evm_emulator_bytecode_hash + .as_ref() + .map(ethabi::Function::short_signature); + let bootloader_signature = functions.get_l2_bootloader_bytecode_hash.short_signature(); + let default_aa_signature = functions + .get_l2_default_account_bytecode_hash + .short_signature(); + let evm_emulator_getter_signature = evm_emulator_getter_signature.as_ref().map(|sig| &sig[..]); + + let calldata = &call.data.as_ref().expect("no calldata").0; + assert_eq!(calldata[..4], functions.aggregate3.short_signature()); + let mut tokens = functions + .aggregate3 + .decode_input(&calldata[4..]) + .expect("invalid multicall"); + assert_eq!(tokens.len(), 1); + let Token::Array(tokens) = tokens.pop().unwrap() else { + panic!("Unexpected input: {tokens:?}"); + }; + + let calls = tokens.into_iter().map(Multicall3Call::from_token); + let response = calls.map(|call| { + let call = call.unwrap(); + assert_eq!(call.target, STATE_TRANSITION_CONTRACT_ADDRESS); + let output = match &call.calldata[..4] { + selector if selector == bootloader_signature => { + vec![1u8; 32] + } + selector if selector == default_aa_signature => { + vec![2u8; 32] + } + selector if Some(selector) == evm_emulator_getter_signature => { + vec![3u8; 32] + } + selector if selector == functions.get_verifier_params.short_signature() => { + vec![4u8; 96] + } + selector if selector == functions.get_verifier.short_signature() => { + vec![5u8; 32] + } + selector if selector == functions.get_protocol_version.short_signature() => { H256::from_low_u64_be(ProtocolVersionId::default() as u64) .0 - .to_vec(), - ), - ]), - ]) + .to_vec() + } + _ => panic!("unexpected call: {call:?}"), + }; + Token::Tuple(vec![Token::Bool(true), Token::Bytes(output)]) + }); + Token::Array(response.collect()) } pub(crate) fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { @@ -75,6 +118,7 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { zkporter_is_available: false, bootloader_code_hash: H256::default(), default_aa_code_hash: H256::default(), + evm_emulator_code_hash: None, protocol_version: Some(ProtocolVersionId::default()), }, aux_data_hash: H256::default(), @@ -660,22 +704,71 @@ async fn skipped_l1_batch_in_the_middle( Ok(()) } -#[test_casing(2, COMMITMENT_MODES)] +#[test_casing(2, [false, true])] #[test_log::test(tokio::test)] -async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { +async fn parsing_multicall_data(with_evm_emulator: bool) { let tester = EthSenderTester::new( ConnectionPool::::test_pool().await, vec![100; 100], false, true, - commitment_mode, + L1BatchCommitmentMode::Rollup, ) .await; - assert!(tester + let mut mock_response = vec![ + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 96])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![5u8; 32])]), + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes( + H256::from_low_u64_be(ProtocolVersionId::latest() as u64) + .0 + .to_vec(), + ), + ]), + ]; + if with_evm_emulator { + mock_response.insert( + 2, + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 32])]), + ); + } + let mock_response = Token::Array(mock_response); + + let parsed = tester .aggregator - .parse_multicall_data(mock_multicall_response()) - .is_ok()); + .parse_multicall_data(mock_response, with_evm_emulator) + .unwrap(); + assert_eq!( + parsed.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + parsed.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + let expected_evm_emulator_hash = with_evm_emulator.then(|| H256::repeat_byte(3)); + assert_eq!( + parsed.base_system_contracts_hashes.evm_emulator, + expected_evm_emulator_hash + ); + assert_eq!(parsed.verifier_address, Address::repeat_byte(5)); + assert_eq!(parsed.protocol_version_id, ProtocolVersionId::latest()); +} + +#[test_log::test(tokio::test)] +async fn parsing_multicall_data_errors() { + let tester = EthSenderTester::new( + ConnectionPool::::test_pool().await, + vec![100; 100], + false, + true, + L1BatchCommitmentMode::Rollup, + ) + .await; let original_wrong_form_data = vec![ // should contain 5 tuples @@ -726,7 +819,7 @@ async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { assert_matches!( tester .aggregator - .parse_multicall_data(wrong_data_instance.clone()), + .parse_multicall_data(wrong_data_instance.clone(), true), Err(EthSenderError::Parse(Error::InvalidOutputType(_))) ); } @@ -743,6 +836,17 @@ async fn get_multicall_data(commitment_mode: L1BatchCommitmentMode) { commitment_mode, ) .await; - let multicall_data = tester.aggregator.get_multicall_data().await; - assert!(multicall_data.is_ok()); + + let data = tester.aggregator.get_multicall_data().await.unwrap(); + assert_eq!( + data.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + data.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + assert_eq!(data.base_system_contracts_hashes.evm_emulator, None); + assert_eq!(data.verifier_address, Address::repeat_byte(5)); + assert_eq!(data.protocol_version_id, ProtocolVersionId::latest()); } diff --git a/core/node/eth_sender/src/zksync_functions.rs b/core/node/eth_sender/src/zksync_functions.rs index 05c9805a4fc..f3e4998ef37 100644 --- a/core/node/eth_sender/src/zksync_functions.rs +++ b/core/node/eth_sender/src/zksync_functions.rs @@ -17,6 +17,7 @@ pub(super) struct ZkSyncFunctions { pub(super) get_l2_bootloader_bytecode_hash: Function, pub(super) get_l2_default_account_bytecode_hash: Function, pub(super) get_verifier: Function, + pub(super) get_evm_emulator_bytecode_hash: Option, pub(super) get_verifier_params: Function, pub(super) get_protocol_version: Function, @@ -37,6 +38,14 @@ fn get_function(contract: &Contract, name: &str) -> Function { .unwrap_or_else(|| panic!("{} function entry not found", name)) } +fn get_optional_function(contract: &Contract, name: &str) -> Option { + contract + .functions + .get(name) + .cloned() + .map(|mut functions| functions.pop().unwrap()) +} + impl Default for ZkSyncFunctions { fn default() -> Self { let zksync_contract = hyperchain_contract(); @@ -55,6 +64,8 @@ impl Default for ZkSyncFunctions { get_function(&zksync_contract, "getL2BootloaderBytecodeHash"); let get_l2_default_account_bytecode_hash = get_function(&zksync_contract, "getL2DefaultAccountBytecodeHash"); + let get_evm_emulator_bytecode_hash = + get_optional_function(&zksync_contract, "getL2EvmSimulatorBytecodeHash"); let get_verifier = get_function(&zksync_contract, "getVerifier"); let get_verifier_params = get_function(&zksync_contract, "getVerifierParams"); let get_protocol_version = get_function(&zksync_contract, "getProtocolVersion"); @@ -70,6 +81,7 @@ impl Default for ZkSyncFunctions { post_gateway_execute, get_l2_bootloader_bytecode_hash, get_l2_default_account_bytecode_hash, + get_evm_emulator_bytecode_hash, get_verifier, get_verifier_params, get_protocol_version, diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 54376bae82e..ac5fc86c6e9 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -46,7 +46,8 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { pub const RETRY_LIMIT: usize = 5; const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; -const TOO_MANY_RESULTS_RETH: &str = "query exceeds max block range"; +const TOO_MANY_RESULTS_RETH: &str = "length limit exceeded"; +const TOO_BIG_RANGE_RETH: &str = "query exceeds max block range"; const TOO_MANY_RESULTS_CHAINSTACK: &str = "range limit exceeded"; /// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). @@ -149,6 +150,7 @@ impl EthHttpQueryClient { if err_message.contains(TOO_MANY_RESULTS_INFURA) || err_message.contains(TOO_MANY_RESULTS_ALCHEMY) || err_message.contains(TOO_MANY_RESULTS_RETH) + || err_message.contains(TOO_BIG_RANGE_RETH) || err_message.contains(TOO_MANY_RESULTS_CHAINSTACK) { // get the numeric block ids diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index 6020b132ddb..1dc72dca3c2 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -245,8 +245,11 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - let tx = - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()).unwrap(); + let tx = Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap(); tx.try_into().unwrap() } @@ -272,10 +275,13 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()) - .unwrap() - .try_into() - .unwrap() + Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap() + .try_into() + .unwrap() } async fn create_test_watcher( diff --git a/core/node/fee_model/Cargo.toml b/core/node/fee_model/Cargo.toml index 8760b97d9db..a84a7c5c217 100644 --- a/core/node/fee_model/Cargo.toml +++ b/core/node/fee_model/Cargo.toml @@ -16,9 +16,7 @@ zksync_types.workspace = true zksync_dal.workspace = true zksync_config.workspace = true zksync_eth_client.workspace = true -zksync_utils.workspace = true zksync_web3_decl.workspace = true -bigdecimal.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index e43de3e34bf..6fce46f7722 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -6,9 +6,12 @@ use std::{ }; use tokio::sync::watch; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; +use zksync_config::GasAdjusterConfig; use zksync_eth_client::EthFeeInterface; -use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U256}; +use zksync_types::{ + commitment::L1BatchCommitmentMode, pubdata_da::PubdataSendingMode, L1_GAS_PER_PUBDATA_BYTE, + U256, +}; use zksync_web3_decl::client::{DynClient, L1, L2}; use self::metrics::METRICS; @@ -317,14 +320,14 @@ impl TxParamsProvider for GasAdjuster { // smooth out base_fee increases in general. // In other words, in order to pay less fees, we are ready to wait longer. // But the longer we wait, the more we are ready to pay. - fn get_base_fee(&self, time_in_mempool: u32) -> u64 { + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64 { let a = self.config.pricing_formula_parameter_a; let b = self.config.pricing_formula_parameter_b; // Currently we use an exponential formula. // The alternative is a linear one: - // `let scale_factor = a + b * time_in_mempool as f64;` - let scale_factor = a * b.powf(time_in_mempool as f64); + // `let scale_factor = a + b * time_in_mempool_in_l1_blocks as f64;` + let scale_factor = a * b.powf(time_in_mempool_in_l1_blocks as f64); let median = self.base_fee_statistics.median(); METRICS.median_base_fee_per_gas.set(median); let new_fee = median as f64 * scale_factor; diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 47023203de0..ab649e2d7c9 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -1,9 +1,11 @@ use std::{collections::VecDeque, sync::RwLockReadGuard}; use test_casing::test_casing; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; +use zksync_config::GasAdjusterConfig; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; -use zksync_types::{commitment::L1BatchCommitmentMode, settlement::SettlementMode}; +use zksync_types::{ + commitment::L1BatchCommitmentMode, pubdata_da::PubdataSendingMode, settlement::SettlementMode, +}; use zksync_web3_decl::client::L2; use super::{GasAdjuster, GasStatistics, GasStatisticsInner}; diff --git a/core/node/fee_model/src/l1_gas_price/mod.rs b/core/node/fee_model/src/l1_gas_price/mod.rs index 2a5d63089ca..e23bccf27ee 100644 --- a/core/node/fee_model/src/l1_gas_price/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/mod.rs @@ -16,7 +16,7 @@ mod main_node_fetcher; /// This trait, as a bound, should only be used in components that actually sign and send transactions. pub trait TxParamsProvider: fmt::Debug + 'static + Send + Sync { /// Returns the recommended `max_fee_per_gas` value (EIP1559). - fn get_base_fee(&self, time_in_mempool: u32) -> u64; + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64; /// Returns the recommended `max_priority_fee_per_gas` value (EIP1559). fn get_priority_fee(&self) -> u64; diff --git a/core/node/fee_model/src/lib.rs b/core/node/fee_model/src/lib.rs index fe4f6a27ce2..380a279cccc 100644 --- a/core/node/fee_model/src/lib.rs +++ b/core/node/fee_model/src/lib.rs @@ -3,14 +3,9 @@ use std::{fmt, fmt::Debug, sync::Arc}; use anyhow::Context as _; use async_trait::async_trait; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_types::{ - fee_model::{ - BaseTokenConversionRatio, BatchFeeInput, FeeModelConfig, FeeModelConfigV2, FeeParams, - FeeParamsV1, FeeParamsV2, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput, - }, - U256, +use zksync_types::fee_model::{ + BaseTokenConversionRatio, BatchFeeInput, FeeModelConfig, FeeParams, FeeParamsV1, FeeParamsV2, }; -use zksync_utils::ceil_div_u256; use crate::l1_gas_price::GasAdjuster; @@ -34,13 +29,7 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { l1_pubdata_price_scale_factor: f64, ) -> anyhow::Result { let params = self.get_fee_model_params(); - Ok( - ::default_batch_fee_input_scaled( - params, - l1_gas_price_scale_factor, - l1_pubdata_price_scale_factor, - ), - ) + Ok(params.scale(l1_gas_price_scale_factor, l1_pubdata_price_scale_factor)) } /// Returns the fee model parameters using the denomination of the base token used (WEI for ETH). @@ -48,27 +37,6 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { } impl dyn BatchFeeModelInputProvider { - /// Provides the default implementation of `get_batch_fee_input_scaled()` given [`FeeParams`]. - pub fn default_batch_fee_input_scaled( - params: FeeParams, - l1_gas_price_scale_factor: f64, - l1_pubdata_price_scale_factor: f64, - ) -> BatchFeeInput { - match params { - FeeParams::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( - params, - l1_gas_price_scale_factor, - )), - FeeParams::V2(params) => BatchFeeInput::PubdataIndependent( - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2( - params, - l1_gas_price_scale_factor, - l1_pubdata_price_scale_factor, - )), - ), - } - } - /// Returns the batch fee input as-is, i.e. without any scaling for the L1 gas and pubdata prices. pub async fn get_batch_fee_input(&self) -> anyhow::Result { self.get_batch_fee_input_scaled(1.0, 1.0).await @@ -168,122 +136,6 @@ impl BatchFeeModelInputProvider for ApiFeeInputProvider { } } -/// Calculates the batch fee input based on the main node parameters. -/// This function uses the `V1` fee model, i.e. where the pubdata price does not include the proving costs. -fn compute_batch_fee_model_input_v1( - params: FeeParamsV1, - l1_gas_price_scale_factor: f64, -) -> L1PeggedBatchFeeModelInput { - let l1_gas_price = (params.l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; - - L1PeggedBatchFeeModelInput { - l1_gas_price, - fair_l2_gas_price: params.config.minimal_l2_gas_price, - } -} - -/// Calculates the batch fee input based on the main node parameters. -/// This function uses the `V2` fee model, i.e. where the pubdata price does not include the proving costs. -fn compute_batch_fee_model_input_v2( - params: FeeParamsV2, - l1_gas_price_scale_factor: f64, - l1_pubdata_price_scale_factor: f64, -) -> PubdataIndependentBatchFeeModelInput { - let config = params.config(); - let l1_gas_price = params.l1_gas_price(); - let l1_pubdata_price = params.l1_pubdata_price(); - - let FeeModelConfigV2 { - minimal_l2_gas_price, - compute_overhead_part, - pubdata_overhead_part, - batch_overhead_l1_gas, - max_gas_per_batch, - max_pubdata_per_batch, - } = config; - - // Firstly, we scale the gas price and pubdata price in case it is needed. - let l1_gas_price = (l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; - let l1_pubdata_price = (l1_pubdata_price as f64 * l1_pubdata_price_scale_factor) as u64; - - // While the final results of the calculations are not expected to have any overflows, the intermediate computations - // might, so we use U256 for them. - let l1_batch_overhead_wei = U256::from(l1_gas_price) * U256::from(batch_overhead_l1_gas); - - let fair_l2_gas_price = { - // Firstly, we calculate which part of the overall overhead each unit of L2 gas should cover. - let l1_batch_overhead_per_gas = - ceil_div_u256(l1_batch_overhead_wei, U256::from(max_gas_per_batch)); - - // Then, we multiply by the `compute_overhead_part` to get the overhead for the computation for each gas. - // Also, this means that if we almost never close batches because of compute, the `compute_overhead_part` should be zero and so - // it is possible that the computation costs include for no overhead. - let gas_overhead_wei = - (l1_batch_overhead_per_gas.as_u64() as f64 * compute_overhead_part) as u64; - - // We sum up the minimal L2 gas price (i.e. the raw prover/compute cost of a single L2 gas) and the overhead for batch being closed. - minimal_l2_gas_price + gas_overhead_wei - }; - - let fair_pubdata_price = { - // Firstly, we calculate which part of the overall overhead each pubdata byte should cover. - let l1_batch_overhead_per_pubdata = - ceil_div_u256(l1_batch_overhead_wei, U256::from(max_pubdata_per_batch)); - - // Then, we multiply by the `pubdata_overhead_part` to get the overhead for each pubdata byte. - // Also, this means that if we almost never close batches because of pubdata, the `pubdata_overhead_part` should be zero and so - // it is possible that the pubdata costs include no overhead. - let pubdata_overhead_wei = - (l1_batch_overhead_per_pubdata.as_u64() as f64 * pubdata_overhead_part) as u64; - - // We sum up the raw L1 pubdata price (i.e. the expected price of publishing a single pubdata byte) and the overhead for batch being closed. - l1_pubdata_price + pubdata_overhead_wei - }; - - PubdataIndependentBatchFeeModelInput { - l1_gas_price, - fair_l2_gas_price, - fair_pubdata_price, - } -} - -/// Bootloader places limitations on fair_l2_gas_price and fair_pubdata_price. -/// (MAX_ALLOWED_FAIR_L2_GAS_PRICE and MAX_ALLOWED_FAIR_PUBDATA_PRICE in bootloader code respectively) -/// Server needs to clip this prices in order to allow chain continues operation at a loss. The alternative -/// would be to stop accepting the transactions until the conditions improve. -/// TODO (PE-153): to be removed when bootloader limitation is removed -fn clip_batch_fee_model_input_v2( - fee_model: PubdataIndependentBatchFeeModelInput, -) -> PubdataIndependentBatchFeeModelInput { - /// MAX_ALLOWED_FAIR_L2_GAS_PRICE - const MAXIMUM_L2_GAS_PRICE: u64 = 10_000_000_000_000; - /// MAX_ALLOWED_FAIR_PUBDATA_PRICE - const MAXIMUM_PUBDATA_PRICE: u64 = 1_000_000_000_000_000; - PubdataIndependentBatchFeeModelInput { - l1_gas_price: fee_model.l1_gas_price, - fair_l2_gas_price: if fee_model.fair_l2_gas_price < MAXIMUM_L2_GAS_PRICE { - fee_model.fair_l2_gas_price - } else { - tracing::warn!( - "Fair l2 gas price {} exceeds maximum. Limitting to {}", - fee_model.fair_l2_gas_price, - MAXIMUM_L2_GAS_PRICE - ); - MAXIMUM_L2_GAS_PRICE - }, - fair_pubdata_price: if fee_model.fair_pubdata_price < MAXIMUM_PUBDATA_PRICE { - fee_model.fair_pubdata_price - } else { - tracing::warn!( - "Fair pubdata price {} exceeds maximum. Limitting to {}", - fee_model.fair_pubdata_price, - MAXIMUM_PUBDATA_PRICE - ); - MAXIMUM_PUBDATA_PRICE - }, - } -} - /// Mock [`BatchFeeModelInputProvider`] implementation that returns a constant value. /// Intended to be used in tests only. #[derive(Debug)] @@ -307,308 +159,17 @@ mod tests { use std::num::NonZeroU64; use l1_gas_price::GasAdjusterClient; - use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; + use zksync_config::GasAdjusterConfig; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; - use zksync_types::{commitment::L1BatchCommitmentMode, fee_model::BaseTokenConversionRatio}; + use zksync_types::{ + commitment::L1BatchCommitmentMode, + fee_model::{BaseTokenConversionRatio, FeeModelConfigV2}, + pubdata_da::PubdataSendingMode, + U256, + }; use super::*; - // To test that overflow never happens, we'll use giant L1 gas price, i.e. - // almost realistic very large value of 100k gwei. Since it is so large, we'll also - // use it for the L1 pubdata price. - const GWEI: u64 = 1_000_000_000; - const GIANT_L1_GAS_PRICE: u64 = 100_000 * GWEI; - - // As a small L2 gas price we'll use the value of 1 wei. - const SMALL_L1_GAS_PRICE: u64 = 1; - - #[test] - fn test_compute_batch_fee_model_input_v2_giant_numbers() { - let config = FeeModelConfigV2 { - minimal_l2_gas_price: GIANT_L1_GAS_PRICE, - // We generally don't expect those values to be larger than 1. Still, in theory the operator - // may need to set higher values in extreme cases. - compute_overhead_part: 5.0, - pubdata_overhead_part: 5.0, - // The batch overhead would likely never grow beyond that - batch_overhead_l1_gas: 1_000_000, - // Let's imagine that for some reason the limit is relatively small - max_gas_per_batch: 50_000_000, - // The pubdata will likely never go below that - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - // We'll use scale factor of 3.0 - let input = compute_batch_fee_model_input_v2(params, 3.0, 3.0); - - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE * 3); - assert_eq!(input.fair_l2_gas_price, 130_000_000_000_000); - assert_eq!(input.fair_pubdata_price, 15_300_000_000_000_000); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_small_numbers() { - // Here we assume that the operator wants to make the lives of users as cheap as possible. - let config = FeeModelConfigV2 { - minimal_l2_gas_price: SMALL_L1_GAS_PRICE, - compute_overhead_part: 0.0, - pubdata_overhead_part: 0.0, - batch_overhead_l1_gas: 0, - max_gas_per_batch: 50_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - SMALL_L1_GAS_PRICE, - SMALL_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - - assert_eq!(input.l1_gas_price, SMALL_L1_GAS_PRICE); - assert_eq!(input.fair_l2_gas_price, SMALL_L1_GAS_PRICE); - assert_eq!(input.fair_pubdata_price, SMALL_L1_GAS_PRICE); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_only_pubdata_overhead() { - // Here we use sensible config, but when only pubdata is used to close the batch - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 0.0, - pubdata_overhead_part: 1.0, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); - // The fair L2 gas price is identical to the minimal one. - assert_eq!(input.fair_l2_gas_price, 100_000_000_000); - // The fair pubdata price is the minimal one plus the overhead. - assert_eq!(input.fair_pubdata_price, 800_000_000_000_000); - } - - #[test] - fn test_compute_baxtch_fee_model_input_v2_only_compute_overhead() { - // Here we use sensible config, but when only compute is used to close the batch - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 1.0, - pubdata_overhead_part: 0.0, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); - // The fair L2 gas price is identical to the minimal one, plus the overhead - assert_eq!(input.fair_l2_gas_price, 240_000_000_000); - // The fair pubdata price is equal to the original one. - assert_eq!(input.fair_pubdata_price, GIANT_L1_GAS_PRICE); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_param_tweaking() { - // In this test we generally checking that each param behaves as expected - let base_config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let base_params = FeeParamsV2::new( - base_config, - 1_000_000_000, - 1_000_000_000, - BaseTokenConversionRatio::default(), - ); - - let base_input = compute_batch_fee_model_input_v2(base_params, 1.0, 1.0); - - let base_input_larger_l1_gas_price = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - base_config, - 2_000_000_000, // double the L1 gas price - 1_000_000_000, - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - let base_input_scaled_l1_gas_price = - compute_batch_fee_model_input_v2(base_params, 2.0, 1.0); - assert_eq!( - base_input_larger_l1_gas_price, base_input_scaled_l1_gas_price, - "Scaling has the correct effect for the L1 gas price" - ); - assert!( - base_input.fair_l2_gas_price < base_input_larger_l1_gas_price.fair_l2_gas_price, - "L1 gas price increase raises L2 gas price" - ); - assert!( - base_input.fair_pubdata_price < base_input_larger_l1_gas_price.fair_pubdata_price, - "L1 gas price increase raises pubdata price" - ); - - let base_input_larger_pubdata_price = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - base_config, - 1_000_000_000, - 2_000_000_000, // double the L1 pubdata price - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - let base_input_scaled_pubdata_price = - compute_batch_fee_model_input_v2(base_params, 1.0, 2.0); - assert_eq!( - base_input_larger_pubdata_price, base_input_scaled_pubdata_price, - "Scaling has the correct effect for the pubdata price" - ); - assert_eq!( - base_input.fair_l2_gas_price, base_input_larger_pubdata_price.fair_l2_gas_price, - "L1 pubdata increase has no effect on L2 gas price" - ); - assert!( - base_input.fair_pubdata_price < base_input_larger_pubdata_price.fair_pubdata_price, - "Pubdata price increase raises pubdata price" - ); - - let base_input_larger_max_gas = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - FeeModelConfigV2 { - max_gas_per_batch: base_config.max_gas_per_batch * 2, - ..base_config - }, - base_params.l1_gas_price(), - base_params.l1_pubdata_price(), - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - assert!( - base_input.fair_l2_gas_price > base_input_larger_max_gas.fair_l2_gas_price, - "Max gas increase lowers L2 gas price" - ); - assert_eq!( - base_input.fair_pubdata_price, base_input_larger_max_gas.fair_pubdata_price, - "Max gas increase has no effect on pubdata price" - ); - - let base_input_larger_max_pubdata = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - FeeModelConfigV2 { - max_pubdata_per_batch: base_config.max_pubdata_per_batch * 2, - ..base_config - }, - base_params.l1_gas_price(), - base_params.l1_pubdata_price(), - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - assert_eq!( - base_input.fair_l2_gas_price, base_input_larger_max_pubdata.fair_l2_gas_price, - "Max pubdata increase has no effect on L2 gas price" - ); - assert!( - base_input.fair_pubdata_price > base_input_larger_max_pubdata.fair_pubdata_price, - "Max pubdata increase lowers pubdata price" - ); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_l1_gas() { - // In this test we check the gas price limit works as expected - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100 * GWEI, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let l1_gas_price = 1_000_000_000 * GWEI; - let params = FeeParamsV2::new( - config, - l1_gas_price, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, l1_gas_price); - // The fair L2 gas price is identical to the maximum - assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); - assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_conversion_rate() { - // In this test we check the gas price limit works as expected - let config = FeeModelConfigV2 { - minimal_l2_gas_price: GWEI, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GWEI, - 2 * GWEI, - BaseTokenConversionRatio { - numerator: NonZeroU64::new(3_000_000).unwrap(), - denominator: NonZeroU64::new(1).unwrap(), - }, - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, 3_000_000 * GWEI); - // The fair L2 gas price is identical to the maximum - assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); - assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); - } - #[derive(Debug, Clone)] struct DummyTokenRatioProvider { ratio: BaseTokenConversionRatio, diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 708297b08aa..e549ed5eba1 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -115,6 +115,7 @@ impl GenesisParams { default_aa: config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: config.evm_emulator_hash, }; if base_system_contracts_hashes != base_system_contracts.hashes() { return Err(GenesisError::BaseSystemContractsHashes(Box::new( @@ -135,15 +136,18 @@ impl GenesisParams { } pub fn load_genesis_params(config: GenesisConfig) -> Result { - let base_system_contracts = BaseSystemContracts::load_from_disk(); - let system_contracts = get_system_smart_contracts(); + let mut base_system_contracts = BaseSystemContracts::load_from_disk(); + if config.evm_emulator_hash.is_some() { + base_system_contracts = base_system_contracts.with_latest_evm_emulator(); + } + let system_contracts = get_system_smart_contracts(config.evm_emulator_hash.is_some()); Self::from_genesis_config(config, base_system_contracts, system_contracts) } pub fn mock() -> Self { Self { base_system_contracts: BaseSystemContracts::load_from_disk(), - system_contracts: get_system_smart_contracts(), + system_contracts: get_system_smart_contracts(false), config: mock_genesis_config(), } } @@ -183,6 +187,7 @@ pub fn mock_genesis_config() -> GenesisConfig { genesis_commitment: Some(H256::default()), bootloader_hash: Some(base_system_contracts_hashes.bootloader), default_aa_hash: Some(base_system_contracts_hashes.default_aa), + evm_emulator_hash: base_system_contracts_hashes.evm_emulator, l1_chain_id: L1ChainId(9), sl_chain_id: None, l2_chain_id: L2ChainId::default(), @@ -246,6 +251,7 @@ pub async fn insert_genesis_batch( .config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: genesis_params.config.evm_emulator_hash, }; let commitment_input = CommitmentInput::for_genesis_batch( genesis_root_hash, @@ -404,6 +410,7 @@ pub async fn create_genesis_l1_batch( base_system_contracts.hashes(), protocol_version.minor, ); + let batch_fee_input = BatchFeeInput::pubdata_independent(0, 0, 0); let genesis_l2_block_header = L2BlockHeader { number: L2BlockNumber(0), @@ -413,14 +420,14 @@ pub async fn create_genesis_l1_batch( l2_tx_count: 0, fee_account_address: Default::default(), base_fee_per_gas: 0, - pubdata_params: Default::default(), gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(protocol_version.minor.into()), - batch_fee_input: BatchFeeInput::l1_pegged(0, 0), + batch_fee_input, base_system_contracts_hashes: base_system_contracts.hashes(), protocol_version: Some(protocol_version.minor), virtual_blocks: 0, gas_limit: 0, logs_bloom: Bloom::zero(), + pubdata_params: Default::default(), }; let mut transaction = storage.start_transaction().await?; @@ -431,7 +438,11 @@ pub async fn create_genesis_l1_batch( .await?; transaction .blocks_dal() - .insert_l1_batch( + .insert_l1_batch(genesis_l1_batch_header.to_unsealed_header(batch_fee_input)) + .await?; + transaction + .blocks_dal() + .mark_l1_batch_as_sealed( &genesis_l1_batch_header, &[], BlockGasCount::default(), diff --git a/core/node/genesis/src/utils.rs b/core/node/genesis/src/utils.rs index b3dc34dd8da..62be43a0fe7 100644 --- a/core/node/genesis/src/utils.rs +++ b/core/node/genesis/src/utils.rs @@ -130,7 +130,8 @@ pub(super) async fn insert_base_system_contracts_to_factory_deps( contracts: &BaseSystemContracts, ) -> Result<(), GenesisError> { let factory_deps = [&contracts.bootloader, &contracts.default_aa] - .iter() + .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, be_words_to_bytes(&c.code))) .collect(); diff --git a/core/node/metadata_calculator/src/api_server/metrics.rs b/core/node/metadata_calculator/src/api_server/metrics.rs index d185861d07c..92f948e0970 100644 --- a/core/node/metadata_calculator/src/api_server/metrics.rs +++ b/core/node/metadata_calculator/src/api_server/metrics.rs @@ -9,6 +9,8 @@ use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics pub(super) enum MerkleTreeApiMethod { Info, GetProofs, + GetNodes, + GetStaleKeys, } /// Metrics for Merkle tree API. diff --git a/core/node/metadata_calculator/src/api_server/mod.rs b/core/node/metadata_calculator/src/api_server/mod.rs index 6f46e8aeea8..4612d859a3d 100644 --- a/core/node/metadata_calculator/src/api_server/mod.rs +++ b/core/node/metadata_calculator/src/api_server/mod.rs @@ -1,6 +1,6 @@ //! Primitive Merkle tree API used internally to fetch proofs. -use std::{fmt, future::Future, net::SocketAddr, pin::Pin}; +use std::{collections::HashMap, fmt, future::Future, net::SocketAddr, pin::Pin}; use anyhow::Context as _; use async_trait::async_trait; @@ -10,12 +10,16 @@ use axum::{ response::{IntoResponse, Response}, routing, Json, Router, }; -use serde::{Deserialize, Serialize}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use tokio::sync::watch; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_health_check::{CheckHealth, Health, HealthStatus}; -use zksync_merkle_tree::NoVersionError; -use zksync_types::{L1BatchNumber, H256, U256}; +use zksync_merkle_tree::{ + unstable::{NodeKey, RawNode}, + NoVersionError, ValueHash, +}; +use zksync_types::{web3, L1BatchNumber, H256, U256}; +use zksync_utils::u256_to_h256; use self::metrics::{MerkleTreeApiMethod, API_METRICS}; use crate::{AsyncTreeReader, LazyAsyncTreeReader, MerkleTreeInfo}; @@ -77,6 +81,117 @@ impl TreeEntryWithProof { } } +#[derive(Debug, PartialEq, Eq, Hash)] +struct HexNodeKey(NodeKey); + +impl Serialize for HexNodeKey { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&self.0.to_string()) + } +} + +impl<'de> Deserialize<'de> for HexNodeKey { + fn deserialize>(deserializer: D) -> Result { + struct HexNodeKeyVisitor; + + impl de::Visitor<'_> for HexNodeKeyVisitor { + type Value = HexNodeKey; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("hex-encoded versioned key like `123:c0ffee`") + } + + fn visit_str(self, v: &str) -> Result { + v.parse().map(HexNodeKey).map_err(de::Error::custom) + } + } + + deserializer.deserialize_str(HexNodeKeyVisitor) + } +} + +#[derive(Debug, Serialize)] +struct ApiLeafNode { + full_key: H256, + value_hash: H256, + leaf_index: u64, +} + +#[derive(Debug, Serialize)] +struct ApiChildRef { + hash: ValueHash, + version: u64, + is_leaf: bool, +} + +#[derive(Debug, Serialize)] +#[serde(transparent)] +struct ApiInternalNode(HashMap); + +#[derive(Debug, Serialize)] +struct ApiRawNode { + raw: web3::Bytes, + #[serde(skip_serializing_if = "Option::is_none")] + leaf: Option, + #[serde(skip_serializing_if = "Option::is_none")] + internal: Option, +} + +impl From for ApiRawNode { + fn from(node: RawNode) -> Self { + Self { + raw: web3::Bytes(node.raw), + leaf: node.leaf.map(|leaf| ApiLeafNode { + full_key: u256_to_h256(leaf.full_key), + value_hash: leaf.value_hash, + leaf_index: leaf.leaf_index, + }), + internal: node.internal.map(|internal| { + ApiInternalNode( + internal + .children() + .map(|(nibble, child_ref)| { + let nibble = if nibble < 10 { + b'0' + nibble + } else { + b'a' + nibble - 10 + }; + ( + char::from(nibble), + ApiChildRef { + hash: child_ref.hash, + version: child_ref.version, + is_leaf: child_ref.is_leaf, + }, + ) + }) + .collect(), + ) + }), + } + } +} + +#[derive(Debug, Deserialize)] +struct TreeNodesRequest { + keys: Vec, +} + +#[derive(Debug, Serialize)] +struct TreeNodesResponse { + nodes: HashMap, +} + +#[derive(Debug, Deserialize)] +struct StaleKeysRequest { + l1_batch_number: L1BatchNumber, +} + +#[derive(Debug, Serialize)] +struct StaleKeysResponse { + stale_keys: Vec, +} + /// Server-side tree API error. #[derive(Debug)] enum TreeApiServerError { @@ -343,6 +458,35 @@ impl AsyncTreeReader { Ok(Json(response)) } + async fn get_nodes_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetNodes].start(); + let keys: Vec<_> = request.keys.iter().map(|key| key.0).collect(); + let nodes = this.clone().raw_nodes(keys).await; + let nodes = request + .keys + .into_iter() + .zip(nodes) + .filter_map(|(key, node)| Some((key, node?.into()))) + .collect(); + let response = TreeNodesResponse { nodes }; + latency.observe(); + Json(response) + } + + async fn get_stale_keys_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetStaleKeys].start(); + let stale_keys = this.clone().raw_stale_keys(request.l1_batch_number).await; + let stale_keys = stale_keys.into_iter().map(HexNodeKey).collect(); + latency.observe(); + Json(StaleKeysResponse { stale_keys }) + } + async fn create_api_server( self, bind_address: &SocketAddr, @@ -353,6 +497,11 @@ impl AsyncTreeReader { let app = Router::new() .route("/", routing::get(Self::info_handler)) .route("/proofs", routing::post(Self::get_proofs_handler)) + .route("/debug/nodes", routing::post(Self::get_nodes_handler)) + .route( + "/debug/stale-keys", + routing::post(Self::get_stale_keys_handler), + ) .with_state(self); let listener = tokio::net::TcpListener::bind(bind_address) @@ -369,8 +518,8 @@ impl AsyncTreeReader { } tracing::info!("Stop signal received, Merkle tree API server is shutting down"); }) - .await - .context("Merkle tree API server failed")?; + .await + .context("Merkle tree API server failed")?; tracing::info!("Merkle tree API server shut down"); Ok(()) diff --git a/core/node/metadata_calculator/src/api_server/tests.rs b/core/node/metadata_calculator/src/api_server/tests.rs index 42a3152e6b5..d5e8f328294 100644 --- a/core/node/metadata_calculator/src/api_server/tests.rs +++ b/core/node/metadata_calculator/src/api_server/tests.rs @@ -72,11 +72,69 @@ async fn merkle_tree_api() { assert_eq!(err.version_count, 6); assert_eq!(err.missing_version, 10); + let raw_nodes_response = api_client + .inner + .post(format!("http://{local_addr}/debug/nodes")) + .json(&serde_json::json!({ "keys": ["0:", "0:0"] })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_nodes_response: serde_json::Value = raw_nodes_response.json().await.unwrap(); + assert_raw_nodes_response(&raw_nodes_response); + + let raw_stale_keys_response = api_client + .inner + .post(format!("http://{local_addr}/debug/stale-keys")) + .json(&serde_json::json!({ "l1_batch_number": 1 })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_stale_keys_response: serde_json::Value = raw_stale_keys_response.json().await.unwrap(); + assert_raw_stale_keys_response(&raw_stale_keys_response); + // Stop the calculator and the tree API server. stop_sender.send_replace(true); api_server_task.await.unwrap().unwrap(); } +fn assert_raw_nodes_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let response = response["nodes"].as_object().expect("not an object"); + let root = response["0:"].as_object().expect("not an object"); + assert!( + root.len() == 2 && root.contains_key("internal") && root.contains_key("raw"), + "{root:#?}" + ); + let root = root["internal"].as_object().expect("not an object"); + for key in root.keys() { + assert_eq!(key.len(), 1, "{key}"); + let key = key.as_bytes()[0]; + assert_matches!(key, b'0'..=b'9' | b'a'..=b'f'); + } + + if let Some(value) = response.get("0:0") { + let node = value.as_object().expect("not an object"); + assert!( + node.len() == 2 && node.contains_key("internal") && node.contains_key("raw"), + "{node:#?}" + ); + } +} + +fn assert_raw_stale_keys_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let stale_keys = response["stale_keys"].as_array().expect("not an array"); + assert!(!stale_keys.is_empty()); // At least the root is always obsoleted + for stale_key in stale_keys { + let stale_key = stale_key.as_str().expect("not a string"); + stale_key.parse::().unwrap(); + } +} + #[tokio::test] async fn api_client_connection_error() { // Use an address that will definitely fail on a timeout. diff --git a/core/node/metadata_calculator/src/helpers.rs b/core/node/metadata_calculator/src/helpers.rs index b6989afb179..3f370afaf77 100644 --- a/core/node/metadata_calculator/src/helpers.rs +++ b/core/node/metadata_calculator/src/helpers.rs @@ -22,6 +22,7 @@ use zksync_health_check::{CheckHealth, Health, HealthStatus, ReactiveHealthCheck use zksync_merkle_tree::{ domain::{TreeMetadata, ZkSyncTree, ZkSyncTreeReader}, recovery::{MerkleTreeRecovery, PersistenceThreadHandle}, + unstable::{NodeKey, RawNode}, Database, Key, MerkleTreeColumnFamily, NoVersionError, RocksDBWrapper, TreeEntry, TreeEntryWithProof, TreeInstruction, }; @@ -35,7 +36,7 @@ use zksync_types::{ use super::{ metrics::{LoadChangesStage, TreeUpdateStage, METRICS}, pruning::PruningHandles, - MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, }; /// General information about the Merkle tree. @@ -176,6 +177,40 @@ fn create_db_sync(config: &MetadataCalculatorConfig) -> anyhow::Result anyhow::Result { + tokio::task::spawn_blocking(move || { + let MerkleTreeReaderConfig { + db_path, + max_open_files, + multi_get_chunk_size, + block_cache_capacity, + include_indices_and_filters_in_block_cache, + } = config; + + tracing::info!( + "Initializing Merkle tree database at `{db_path}` (max open files: {max_open_files:?}) with {multi_get_chunk_size} multi-get chunk size, \ + {block_cache_capacity}B block cache (indices & filters included: {include_indices_and_filters_in_block_cache:?})" + ); + let mut db = RocksDB::with_options( + db_path.as_ref(), + RocksDBOptions { + block_cache_capacity: Some(block_cache_capacity), + include_indices_and_filters_in_block_cache, + max_open_files, + ..RocksDBOptions::default() + } + )?; + if cfg!(test) { + db = db.with_sync_writes(); + } + Ok(RocksDBWrapper::from(db)) + }) + .await + .context("panicked creating Merkle tree RocksDB")? +} + /// Wrapper around the "main" tree implementation used by [`MetadataCalculator`]. /// /// Async methods provided by this wrapper are not cancel-safe! This is probably not an issue; @@ -307,6 +342,13 @@ pub struct AsyncTreeReader { } impl AsyncTreeReader { + pub(super) fn new(db: RocksDBWrapper, mode: MerkleTreeMode) -> anyhow::Result { + Ok(Self { + inner: ZkSyncTreeReader::new(db)?, + mode, + }) + } + fn downgrade(&self) -> WeakAsyncTreeReader { WeakAsyncTreeReader { db: self.inner.db().clone().into_inner().downgrade(), @@ -366,6 +408,18 @@ impl AsyncTreeReader { .await .unwrap() } + + pub(crate) async fn raw_nodes(self, keys: Vec) -> Vec> { + tokio::task::spawn_blocking(move || self.inner.raw_nodes(&keys)) + .await + .unwrap() + } + + pub(crate) async fn raw_stale_keys(self, l1_batch_number: L1BatchNumber) -> Vec { + tokio::task::spawn_blocking(move || self.inner.raw_stale_keys(l1_batch_number)) + .await + .unwrap() + } } /// Version of async tree reader that holds a weak reference to RocksDB. Used in [`MerkleTreeHealthCheck`]. diff --git a/core/node/metadata_calculator/src/lib.rs b/core/node/metadata_calculator/src/lib.rs index 451090694b2..5c64330a0e7 100644 --- a/core/node/metadata_calculator/src/lib.rs +++ b/core/node/metadata_calculator/src/lib.rs @@ -27,6 +27,7 @@ pub use self::{ helpers::{AsyncTreeReader, LazyAsyncTreeReader, MerkleTreeInfo}, pruning::MerkleTreePruningTask, }; +use crate::helpers::create_readonly_db; pub mod api_server; mod helpers; @@ -264,3 +265,55 @@ impl MetadataCalculator { .await } } + +/// Configuration of [`TreeReaderTask`]. +#[derive(Debug, Clone)] +pub struct MerkleTreeReaderConfig { + /// Filesystem path to the RocksDB instance that stores the tree. + pub db_path: String, + /// Maximum number of files concurrently opened by RocksDB. Useful to fit into OS limits; can be used + /// as a rudimentary way to control RAM usage of the tree. + pub max_open_files: Option, + /// Chunk size for multi-get operations. Can speed up loading data for the Merkle tree on some environments, + /// but the effects vary wildly depending on the setup (e.g., the filesystem used). + pub multi_get_chunk_size: usize, + /// Capacity of RocksDB block cache in bytes. Reasonable values range from ~100 MiB to several GB. + pub block_cache_capacity: usize, + /// If specified, RocksDB indices and Bloom filters will be managed by the block cache, rather than + /// being loaded entirely into RAM on the RocksDB initialization. The block cache capacity should be increased + /// correspondingly; otherwise, RocksDB performance can significantly degrade. + pub include_indices_and_filters_in_block_cache: bool, +} + +/// Alternative to [`MetadataCalculator`] that provides readonly access to the Merkle tree. +#[derive(Debug)] +pub struct TreeReaderTask { + config: MerkleTreeReaderConfig, + tree_reader: watch::Sender>, +} + +impl TreeReaderTask { + /// Creates a new task with the provided configuration. + pub fn new(config: MerkleTreeReaderConfig) -> Self { + Self { + config, + tree_reader: watch::channel(None).0, + } + } + + /// Returns a reference to the tree reader. + pub fn tree_reader(&self) -> LazyAsyncTreeReader { + LazyAsyncTreeReader(self.tree_reader.subscribe()) + } + + /// Runs this task. The task exits on error, or when the tree reader is successfully initialized. + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let db = tokio::select! { + db_result = create_readonly_db(self.config) => db_result?, + _ = stop_receiver.changed() => return Ok(()), + }; + let reader = AsyncTreeReader::new(db, MerkleTreeMode::Lightweight)?; + self.tree_reader.send_replace(Some(reader)); + Ok(()) + } +} diff --git a/core/node/metadata_calculator/src/updater.rs b/core/node/metadata_calculator/src/updater.rs index e2acf62dea8..17fd5d900ea 100644 --- a/core/node/metadata_calculator/src/updater.rs +++ b/core/node/metadata_calculator/src/updater.rs @@ -152,10 +152,6 @@ impl TreeUpdater { // right away without having to implement dedicated code. if let Some(object_key) = &object_key { - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(l1_batch_number) - .await?; // Save the proof generation details to Postgres storage .proof_generation_dal() diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 7491277c128..ae9f7498929 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -45,7 +45,6 @@ zksync_node_sync.workspace = true zksync_node_api_server.workspace = true zksync_node_consensus.workspace = true zksync_contract_verification_server.workspace = true -zksync_tee_verifier_input_producer.workspace = true zksync_queued_job_processor.workspace = true zksync_reorg_detector.workspace = true zksync_vm_runner.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs b/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs deleted file mode 100644 index 14ab568c2f3..00000000000 --- a/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::sync::Arc; - -use zksync_config::configs::ExternalPriceApiClientConfig; -use zksync_external_price_api::coingecko_api::CoinGeckoPriceAPIClient; - -use crate::{ - implementations::resources::price_api_client::PriceAPIClientResource, - wiring_layer::{WiringError, WiringLayer}, - IntoContext, -}; - -/// Wiring layer for `CoingeckoApiClient` -/// -/// Responsible for inserting a resource with a client to get base token prices from CoinGecko to be -/// used by the `BaseTokenRatioPersister`. -#[derive(Debug)] -pub struct CoingeckoClientLayer { - config: ExternalPriceApiClientConfig, -} - -impl CoingeckoClientLayer { - /// Identifier of used client type. - /// Can be used to choose the layer for the client based on configuration variables. - pub const CLIENT_NAME: &'static str = "coingecko"; -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub price_api_client: PriceAPIClientResource, -} - -impl CoingeckoClientLayer { - pub fn new(config: ExternalPriceApiClientConfig) -> Self { - Self { config } - } -} - -#[async_trait::async_trait] -impl WiringLayer for CoingeckoClientLayer { - type Input = (); - type Output = Output; - - fn layer_name(&self) -> &'static str { - "coingecko_api_client" - } - - async fn wire(self, _input: Self::Input) -> Result { - let cg_client = Arc::new(CoinGeckoPriceAPIClient::new(self.config)); - - Ok(Output { - price_api_client: cg_client.into(), - }) - } -} diff --git a/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs b/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs deleted file mode 100644 index 67785dc26ed..00000000000 --- a/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::sync::Arc; - -use zksync_config::configs::ExternalPriceApiClientConfig; -use zksync_external_price_api::forced_price_client::ForcedPriceClient; - -use crate::{ - implementations::resources::price_api_client::PriceAPIClientResource, - wiring_layer::{WiringError, WiringLayer}, - IntoContext, -}; - -/// Wiring layer for `ForcedPriceClient` -/// -/// Inserts a resource with a forced configured price to be used by the `BaseTokenRatioPersister`. -#[derive(Debug)] -pub struct ForcedPriceClientLayer { - config: ExternalPriceApiClientConfig, -} - -impl ForcedPriceClientLayer { - pub fn new(config: ExternalPriceApiClientConfig) -> Self { - Self { config } - } - - /// Identifier of used client type. - /// Can be used to choose the layer for the client based on configuration variables. - pub const CLIENT_NAME: &'static str = "forced"; -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub price_api_client: PriceAPIClientResource, -} - -#[async_trait::async_trait] -impl WiringLayer for ForcedPriceClientLayer { - type Input = (); - type Output = Output; - - fn layer_name(&self) -> &'static str { - "forced_price_client" - } - - async fn wire(self, _input: Self::Input) -> Result { - let forced_client = Arc::new(ForcedPriceClient::new(self.config)); - - Ok(Output { - price_api_client: forced_client.into(), - }) - } -} diff --git a/core/node/node_framework/src/implementations/layers/base_token/mod.rs b/core/node/node_framework/src/implementations/layers/base_token/mod.rs index 5b58527a3d8..7a63b573d78 100644 --- a/core/node/node_framework/src/implementations/layers/base_token/mod.rs +++ b/core/node/node_framework/src/implementations/layers/base_token/mod.rs @@ -1,5 +1,92 @@ +use std::{str::FromStr, sync::Arc}; + +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_external_price_api::{ + cmc_api::CmcPriceApiClient, coingecko_api::CoinGeckoPriceAPIClient, + forced_price_client::ForcedPriceClient, NoOpPriceAPIClient, +}; + +use crate::{ + implementations::resources::price_api_client::PriceAPIClientResource, IntoContext, WiringError, + WiringLayer, +}; + pub mod base_token_ratio_persister; pub mod base_token_ratio_provider; -pub mod coingecko_client; -pub mod forced_price_client; -pub mod no_op_external_price_api_client; + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] +enum ExternalPriceApiKind { + #[default] + NoOp, + Forced, + CoinGecko, + CoinMarketCap, +} + +#[derive(Debug, thiserror::Error)] +#[error("Unknown external price API client source: \"{0}\"")] +pub struct UnknownExternalPriceApiClientSourceError(String); + +impl FromStr for ExternalPriceApiKind { + type Err = UnknownExternalPriceApiClientSourceError; + + fn from_str(s: &str) -> Result { + Ok(match &s.to_lowercase()[..] { + "no-op" | "noop" => Self::NoOp, + "forced" => Self::Forced, + "coingecko" => Self::CoinGecko, + "coinmarketcap" => Self::CoinMarketCap, + _ => return Err(UnknownExternalPriceApiClientSourceError(s.to_owned())), + }) + } +} + +impl ExternalPriceApiKind { + fn instantiate(&self, config: ExternalPriceApiClientConfig) -> PriceAPIClientResource { + PriceAPIClientResource(match self { + Self::NoOp => Arc::new(NoOpPriceAPIClient {}), + Self::Forced => Arc::new(ForcedPriceClient::new(config)), + Self::CoinGecko => Arc::new(CoinGeckoPriceAPIClient::new(config)), + Self::CoinMarketCap => Arc::new(CmcPriceApiClient::new(config)), + }) + } +} + +#[derive(Debug)] +pub struct ExternalPriceApiLayer { + kind: ExternalPriceApiKind, + config: ExternalPriceApiClientConfig, +} + +impl TryFrom for ExternalPriceApiLayer { + type Error = UnknownExternalPriceApiClientSourceError; + + fn try_from(config: ExternalPriceApiClientConfig) -> Result { + Ok(Self { + kind: config.source.parse()?, + config, + }) + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub price_api_client: PriceAPIClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for ExternalPriceApiLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "external_price_api" + } + + async fn wire(self, _input: Self::Input) -> Result { + Ok(Output { + price_api_client: self.kind.instantiate(self.config), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs b/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs deleted file mode 100644 index 2bf5eda798f..00000000000 --- a/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::sync::Arc; - -use zksync_external_price_api::NoOpPriceAPIClient; - -use crate::{ - implementations::resources::price_api_client::PriceAPIClientResource, - wiring_layer::{WiringError, WiringLayer}, - IntoContext, -}; - -/// Wiring layer for `NoOpExternalPriceApiClient` -/// -/// Inserts a resource with a no-op client to get base token prices to be used by the `BaseTokenRatioPersister`. -#[derive(Debug)] -pub struct NoOpExternalPriceApiClientLayer; - -impl NoOpExternalPriceApiClientLayer { - /// Identifier of used client type. - /// Can be used to choose the layer for the client based on configuration variables. - pub const CLIENT_NAME: &'static str = "no-op"; -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub price_api_client: PriceAPIClientResource, -} - -#[async_trait::async_trait] -impl WiringLayer for NoOpExternalPriceApiClientLayer { - type Input = (); - type Output = Output; - - fn layer_name(&self) -> &'static str { - "no_op_external_price_api_client" - } - - async fn wire(self, _input: Self::Input) -> Result { - let no_op_client = Arc::new(NoOpPriceAPIClient {}); - - Ok(Output { - price_api_client: no_op_client.into(), - }) - } -} diff --git a/core/node/node_framework/src/implementations/layers/gas_adjuster.rs b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs index 229700289a7..241c4d829be 100644 --- a/core/node/node_framework/src/implementations/layers/gas_adjuster.rs +++ b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use anyhow::Context; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig, GenesisConfig}; +use zksync_config::{GasAdjusterConfig, GenesisConfig}; use zksync_node_fee_model::l1_gas_price::GasAdjuster; +use zksync_types::pubdata_da::PubdataSendingMode; use crate::{ implementations::resources::{ diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index 35c4bc3fc20..28f81bb4543 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use zksync_config::configs::chain::StateKeeperConfig; +use zksync_config::configs::chain::{FeeModelVersion, StateKeeperConfig}; use zksync_node_fee_model::{ApiFeeInputProvider, MainNodeFeeInputProvider}; -use zksync_types::fee_model::FeeModelConfig; +use zksync_types::fee_model::{FeeModelConfig, FeeModelConfigV1, FeeModelConfigV2}; use crate::{ implementations::resources::{ @@ -20,7 +20,7 @@ use crate::{ /// Adds several resources that depend on L1 gas price. #[derive(Debug)] pub struct L1GasLayer { - state_keeper_config: StateKeeperConfig, + fee_model_config: FeeModelConfig, } #[derive(Debug, FromContext)] @@ -42,9 +42,25 @@ pub struct Output { } impl L1GasLayer { - pub fn new(state_keeper_config: StateKeeperConfig) -> Self { + pub fn new(state_keeper_config: &StateKeeperConfig) -> Self { Self { - state_keeper_config, + fee_model_config: Self::map_config(state_keeper_config), + } + } + + fn map_config(state_keeper_config: &StateKeeperConfig) -> FeeModelConfig { + match state_keeper_config.fee_model_version { + FeeModelVersion::V1 => FeeModelConfig::V1(FeeModelConfigV1 { + minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, + }), + FeeModelVersion::V2 => FeeModelConfig::V2(FeeModelConfigV2 { + minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, + compute_overhead_part: state_keeper_config.compute_overhead_part, + pubdata_overhead_part: state_keeper_config.pubdata_overhead_part, + batch_overhead_l1_gas: state_keeper_config.batch_overhead_l1_gas, + max_gas_per_batch: state_keeper_config.max_gas_per_batch, + max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, + }), } } } @@ -64,7 +80,7 @@ impl WiringLayer for L1GasLayer { let main_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( input.gas_adjuster.0.clone(), ratio_provider.0, - FeeModelConfig::from_state_keeper_config(&self.state_keeper_config), + self.fee_model_config, )); let replica_pool = input.replica_pool.get().await?; diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 827ec69d942..4092ee6dcd5 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -7,7 +7,8 @@ use std::{ use anyhow::Context as _; use zksync_config::configs::{api::MerkleTreeApiConfig, database::MerkleTreeMode}; use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MerkleTreePruningTask, MetadataCalculator, MetadataCalculatorConfig, + LazyAsyncTreeReader, MerkleTreePruningTask, MerkleTreeReaderConfig, MetadataCalculator, + MetadataCalculatorConfig, TreeReaderTask, }; use zksync_storage::RocksDB; @@ -19,7 +20,7 @@ use crate::{ web3_api::TreeApiClientResource, }, service::{ShutdownHook, StopReceiver}, - task::{Task, TaskId}, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, FromContext, IntoContext, }; @@ -205,3 +206,65 @@ impl Task for MerkleTreePruningTask { (*self).run(stop_receiver.0).await } } + +/// Mutually exclusive with [`MetadataCalculatorLayer`]. +#[derive(Debug)] +pub struct TreeApiServerLayer { + config: MerkleTreeReaderConfig, + api_config: MerkleTreeApiConfig, +} + +impl TreeApiServerLayer { + pub fn new(config: MerkleTreeReaderConfig, api_config: MerkleTreeApiConfig) -> Self { + Self { config, api_config } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct TreeApiServerOutput { + tree_api_client: TreeApiClientResource, + #[context(task)] + tree_reader_task: TreeReaderTask, + #[context(task)] + tree_api_task: TreeApiTask, +} + +#[async_trait::async_trait] +impl WiringLayer for TreeApiServerLayer { + type Input = (); + type Output = TreeApiServerOutput; + + fn layer_name(&self) -> &'static str { + "tree_api_server" + } + + async fn wire(self, (): Self::Input) -> Result { + let tree_reader_task = TreeReaderTask::new(self.config); + let bind_addr = (Ipv4Addr::UNSPECIFIED, self.api_config.port).into(); + let tree_api_task = TreeApiTask { + bind_addr, + tree_reader: tree_reader_task.tree_reader(), + }; + Ok(TreeApiServerOutput { + tree_api_client: TreeApiClientResource(Arc::new(tree_reader_task.tree_reader())), + tree_api_task, + tree_reader_task, + }) + } +} + +#[async_trait::async_trait] +impl Task for TreeReaderTask { + fn kind(&self) -> TaskKind { + TaskKind::OneshotTask + } + + fn id(&self) -> TaskId { + "merkle_tree_reader_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 75828da1902..11a62c9333b 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -33,7 +33,6 @@ pub mod reorg_detector; pub mod sigint; pub mod state_keeper; pub mod sync_state_updater; -pub mod tee_verifier_input_producer; pub mod tree_data_fetcher; pub mod validate_chain_ids; pub mod vm_runner; diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index b53ff73c1a0..3e1269caa4e 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; use crate::{ implementations::resources::{ @@ -21,6 +21,7 @@ use crate::{ pub struct ProofDataHandlerLayer { proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[derive(Debug, FromContext)] @@ -41,10 +42,12 @@ impl ProofDataHandlerLayer { pub fn new( proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Self { Self { proof_data_handler_config, commitment_mode, + l2_chain_id, } } } @@ -67,6 +70,7 @@ impl WiringLayer for ProofDataHandlerLayer { blob_store, main_pool, commitment_mode: self.commitment_mode, + l2_chain_id: self.l2_chain_id, }; Ok(Output { task }) @@ -79,6 +83,7 @@ pub struct ProofDataHandlerTask { blob_store: Arc, main_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[async_trait::async_trait] @@ -93,6 +98,7 @@ impl Task for ProofDataHandlerTask { self.blob_store, self.main_pool, self.commitment_mode, + self.l2_chain_id, stop_receiver.0, ) .await diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs index 31b76550767..2c23f5aa9a1 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs @@ -8,6 +8,7 @@ use zksync_types::L2ChainId; use crate::{ implementations::resources::{ action_queue::ActionQueueSenderResource, + healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, @@ -26,6 +27,7 @@ pub struct ExternalIOLayer { #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { + pub app_health: AppHealthCheckResource, pub pool: PoolResource, pub main_node_client: MainNodeClientResource, } @@ -57,6 +59,10 @@ impl WiringLayer for ExternalIOLayer { async fn wire(self, input: Self::Input) -> Result { // Create `SyncState` resource. let sync_state = SyncState::default(); + let app_health = &input.app_health.0; + app_health + .insert_custom_component(Arc::new(sync_state.clone())) + .map_err(WiringError::internal)?; // Create `ActionQueueSender` resource. let (action_queue_sender, action_queue) = ActionQueue::new(); diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index 191b4a699b9..77992f34c7f 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -1,13 +1,10 @@ use anyhow::Context as _; -use zksync_config::{ - configs::{ - chain::{MempoolConfig, StateKeeperConfig}, - wallets, - }, - ContractsConfig, GenesisConfig, +use zksync_config::configs::{ + chain::{MempoolConfig, StateKeeperConfig}, + wallets, }; use zksync_state_keeper::{MempoolFetcher, MempoolGuard, MempoolIO, SequencerSealer}; -use zksync_types::L2ChainId; +use zksync_types::{commitment::L1BatchCommitmentMode, Address, L2ChainId}; use crate::{ implementations::resources::{ @@ -41,9 +38,9 @@ pub struct MempoolIOLayer { zksync_network_id: L2ChainId, state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, - contracts_config: ContractsConfig, - genesis_config: GenesisConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option

, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, } #[derive(Debug, FromContext)] @@ -67,17 +64,17 @@ impl MempoolIOLayer { zksync_network_id: L2ChainId, state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, - contracts_config: ContractsConfig, - genesis_config: GenesisConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option
, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, ) -> Self { Self { zksync_network_id, state_keeper_config, mempool_config, - contracts_config, - genesis_config, wallets, + l2_da_validator_addr, + l1_batch_commit_data_generator_mode, } } @@ -136,10 +133,10 @@ impl WiringLayer for MempoolIOLayer { mempool_db_pool, &self.state_keeper_config, self.wallets.fee_account.address(), - self.contracts_config.l2_da_validator_addr, - self.genesis_config.l1_batch_commit_data_generator_mode, self.mempool_config.delay_interval(), self.zksync_network_id, + self.l2_da_validator_addr, + self.l1_batch_commit_data_generator_mode, )?; // Create sealer. diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index 7a0de619364..1a07591c1cd 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -1,12 +1,10 @@ use anyhow::Context as _; -use zksync_dal::{Core, CoreDal}; -use zksync_db_connection::connection_pool::ConnectionPool; use zksync_node_framework_derive::FromContext; use zksync_state_keeper::{ io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, L2BlockSealerTask, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, }; -use zksync_types::{Address, ProtocolVersionId}; +use zksync_types::Address; use crate::{ implementations::resources::{ @@ -89,38 +87,6 @@ impl OutputHandlerLayer { self.protective_reads_persistence_enabled = protective_reads_persistence_enabled; self } - - async fn validate_l2_legacy_shared_bridge_addr( - &self, - pool: &ConnectionPool, - ) -> Result<(), WiringError> { - let mut connection = pool.connection().await.context("Get DB connection")?; - - if let Some(l2_block) = connection - .blocks_dal() - .get_earliest_l2_block_number() - .await - .context("failed to load earliest l2 block number")? - { - let header = connection - .blocks_dal() - .get_l2_block_header(l2_block) - .await - .context("failed to load L2 block header")? - .context("missing L2 block header")?; - let protocol_version = header - .protocol_version - .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); - - if protocol_version.is_pre_gateway() && self.l2_legacy_shared_bridge_addr.is_none() { - return Err(WiringError::Configuration( - "Missing `l2_legacy_shared_bridge_addr` for chain that was initialized before gateway upgrade".to_string() - )); - } - } - - Ok(()) - } } #[async_trait::async_trait] @@ -140,14 +106,13 @@ impl WiringLayer for OutputHandlerLayer { .get_custom(L2BlockSealProcess::subtasks_len()) .await .context("Get master pool")?; - self.validate_l2_legacy_shared_bridge_addr(&persistence_pool) - .await?; let (mut persistence, l2_block_sealer) = StateKeeperPersistence::new( persistence_pool.clone(), self.l2_legacy_shared_bridge_addr, self.l2_block_seal_queue_capacity, - ); + ) + .await?; if self.pre_insert_txs { persistence = persistence.with_tx_insertion(); } diff --git a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs index 1f86b43f7a5..dd2652dfddb 100644 --- a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs +++ b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs @@ -1,9 +1,12 @@ +use std::sync::Arc; + use zksync_dal::{ConnectionPool, Core}; use zksync_node_sync::SyncState; use zksync_web3_decl::client::{DynClient, L2}; use crate::{ implementations::resources::{ + healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, sync_state::SyncStateResource, @@ -24,6 +27,7 @@ pub struct SyncStateUpdaterLayer; pub struct Input { /// Fetched to check whether the `SyncState` was already provided by another layer. pub sync_state: Option, + pub app_health: AppHealthCheckResource, pub master_pool: PoolResource, pub main_node_client: MainNodeClientResource, } @@ -62,6 +66,10 @@ impl WiringLayer for SyncStateUpdaterLayer { let MainNodeClientResource(main_node_client) = input.main_node_client; let sync_state = SyncState::default(); + let app_health = &input.app_health.0; + app_health + .insert_custom_component(Arc::new(sync_state.clone())) + .map_err(WiringError::internal)?; Ok(Output { sync_state: Some(sync_state.clone().into()), diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs deleted file mode 100644 index 68789082a22..00000000000 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier_input_producer::TeeVerifierInputProducer; -use zksync_types::L2ChainId; - -use crate::{ - implementations::resources::{ - object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource}, - }, - service::StopReceiver, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, - FromContext, IntoContext, -}; - -/// Wiring layer for [`TeeVerifierInputProducer`]. -#[derive(Debug)] -pub struct TeeVerifierInputProducerLayer { - l2_chain_id: L2ChainId, -} - -impl TeeVerifierInputProducerLayer { - pub fn new(l2_chain_id: L2ChainId) -> Self { - Self { l2_chain_id } - } -} - -#[derive(Debug, FromContext)] -#[context(crate = crate)] -pub struct Input { - pub master_pool: PoolResource, - pub object_store: ObjectStoreResource, -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - #[context(task)] - pub task: TeeVerifierInputProducer, -} - -#[async_trait::async_trait] -impl WiringLayer for TeeVerifierInputProducerLayer { - type Input = Input; - type Output = Output; - - fn layer_name(&self) -> &'static str { - "tee_verifier_input_producer_layer" - } - - async fn wire(self, input: Self::Input) -> Result { - let pool = input.master_pool.get().await?; - let ObjectStoreResource(object_store) = input.object_store; - let task = TeeVerifierInputProducer::new(pool, object_store, self.l2_chain_id).await?; - - Ok(Output { task }) - } -} - -#[async_trait::async_trait] -impl Task for TeeVerifierInputProducer { - fn id(&self) -> TaskId { - "tee_verifier_input_producer".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0, None).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs new file mode 100644 index 00000000000..4ba8098c839 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs @@ -0,0 +1,48 @@ +use std::time::Duration; + +use zksync_node_api_server::web3::state::BridgeAddressesHandle; +use zksync_web3_decl::{ + client::{DynClient, L2}, + namespaces::ZksNamespaceClient, +}; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct BridgeAddressesUpdaterTask { + pub bridge_address_updater: BridgeAddressesHandle, + pub main_node_client: Box>, + pub update_interval: Option, +} + +#[async_trait::async_trait] +impl Task for BridgeAddressesUpdaterTask { + fn id(&self) -> TaskId { + "bridge_addresses_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + const DEFAULT_INTERVAL: Duration = Duration::from_secs(30); + + let update_interval = self.update_interval.unwrap_or(DEFAULT_INTERVAL); + while !*stop_receiver.0.borrow_and_update() { + match self.main_node_client.get_bridge_contracts().await { + Ok(bridge_addresses) => { + self.bridge_address_updater.update(bridge_addresses).await; + } + Err(err) => { + tracing::error!("Failed to query `get_bridge_contracts`, error: {err:?}"); + } + } + + if tokio::time::timeout(update_interval, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs similarity index 81% rename from core/node/node_framework/src/implementations/layers/web3_api/server.rs rename to core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs index 0a39ae747c7..390d321647c 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs @@ -3,15 +3,24 @@ use std::{num::NonZeroU32, time::Duration}; use tokio::{sync::oneshot, task::JoinHandle}; use zksync_circuit_breaker::replication_lag::ReplicationLagChecker; use zksync_config::configs::api::MaxResponseSize; -use zksync_node_api_server::web3::{state::InternalApiConfig, ApiBuilder, ApiServer, Namespace}; +use zksync_node_api_server::web3::{ + state::{BridgeAddressesHandle, InternalApiConfig, SealedL2BlockNumber}, + ApiBuilder, ApiServer, Namespace, +}; use crate::{ - implementations::resources::{ - circuit_breakers::CircuitBreakersResource, - healthcheck::AppHealthCheckResource, - pools::{PoolResource, ReplicaPool}, - sync_state::SyncStateResource, - web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + implementations::{ + layers::web3_api::server::{ + bridge_addresses::BridgeAddressesUpdaterTask, sealed_l2_block::SealedL2BlockUpdaterTask, + }, + resources::{ + circuit_breakers::CircuitBreakersResource, + healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + pools::{PoolResource, ReplicaPool}, + sync_state::SyncStateResource, + web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + }, }, service::StopReceiver, task::{Task, TaskId}, @@ -19,6 +28,9 @@ use crate::{ FromContext, IntoContext, }; +mod bridge_addresses; +mod sealed_l2_block; + /// Set of optional variables that can be altered to modify the behavior of API builder. #[derive(Debug, Default)] pub struct Web3ServerOptionalConfig { @@ -33,6 +45,8 @@ pub struct Web3ServerOptionalConfig { pub replication_lag_limit: Option, // Used by the external node. pub pruning_info_refresh_interval: Option, + // Used by the external node. + pub bridge_addresses_refresh_interval: Option, pub polling_interval: Option, } @@ -61,6 +75,10 @@ impl Web3ServerOptionalConfig { if let Some(polling_interval) = self.polling_interval { api_builder = api_builder.with_polling_interval(polling_interval); } + if let Some(pruning_info_refresh_interval) = self.pruning_info_refresh_interval { + api_builder = + api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); + } api_builder = api_builder.with_extended_tracing(self.with_extended_tracing); api_builder } @@ -109,6 +127,7 @@ pub struct Input { pub circuit_breakers: CircuitBreakersResource, #[context(default)] pub app_health: AppHealthCheckResource, + pub main_node_client: Option, } #[derive(Debug, IntoContext)] @@ -118,6 +137,10 @@ pub struct Output { pub web3_api_task: Web3ApiTask, #[context(task)] pub garbage_collector_task: ApiTaskGarbageCollector, + #[context(task)] + pub sealed_l2_block_updater_task: SealedL2BlockUpdaterTask, + #[context(task)] + pub bridge_addresses_updater_task: Option, } impl Web3ServerLayer { @@ -163,20 +186,39 @@ impl WiringLayer for Web3ServerLayer { async fn wire(self, input: Self::Input) -> Result { // Get required resources. let replica_resource_pool = input.replica_pool; - let updaters_pool = replica_resource_pool.get_custom(2).await?; + let updaters_pool = replica_resource_pool.get_custom(1).await?; let replica_pool = replica_resource_pool.get().await?; let TxSenderResource(tx_sender) = input.tx_sender; let MempoolCacheResource(mempool_cache) = input.mempool_cache; let sync_state = input.sync_state.map(|state| state.0); let tree_api_client = input.tree_api_client.map(|client| client.0); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = + BridgeAddressesHandle::new(self.internal_api_config.bridge_addresses.clone()); + + let sealed_l2_block_updater_task = SealedL2BlockUpdaterTask { + number_updater: sealed_l2_block_handle.clone(), + pool: updaters_pool, + }; + // Bridge addresses updater task must be started for ENs and only for ENs. + let bridge_addresses_updater_task = + input + .main_node_client + .map(|main_node_client| BridgeAddressesUpdaterTask { + bridge_address_updater: bridge_addresses_handle.clone(), + main_node_client: main_node_client.0, + update_interval: self.optional_config.bridge_addresses_refresh_interval, + }); + // Build server. let mut api_builder = ApiBuilder::jsonrpsee_backend(self.internal_api_config, replica_pool.clone()) - .with_updaters_pool(updaters_pool) .with_tx_sender(tx_sender) .with_mempool_cache(mempool_cache) - .with_extended_tracing(self.optional_config.with_extended_tracing); + .with_extended_tracing(self.optional_config.with_extended_tracing) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle); if let Some(client) = tree_api_client { api_builder = api_builder.with_tree_api(client); } @@ -191,14 +233,9 @@ impl WiringLayer for Web3ServerLayer { if let Some(sync_state) = sync_state { api_builder = api_builder.with_sync_state(sync_state); } - if let Some(pruning_info_refresh_interval) = - self.optional_config.pruning_info_refresh_interval - { - api_builder = - api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); - } let replication_lag_limit = self.optional_config.replication_lag_limit; api_builder = self.optional_config.apply(api_builder); + let server = api_builder.build()?; // Insert healthcheck. @@ -230,6 +267,8 @@ impl WiringLayer for Web3ServerLayer { Ok(Output { web3_api_task, garbage_collector_task, + sealed_l2_block_updater_task, + bridge_addresses_updater_task, }) } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs new file mode 100644 index 00000000000..02552e212cd --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs @@ -0,0 +1,50 @@ +use std::time::Duration; + +use zksync_dal::{Core, CoreDal}; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_node_api_server::web3::state::SealedL2BlockNumber; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct SealedL2BlockUpdaterTask { + pub number_updater: SealedL2BlockNumber, + pub pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl Task for SealedL2BlockUpdaterTask { + fn id(&self) -> TaskId { + "api_sealed_l2_block_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // Chosen to be significantly smaller than the interval between L2 blocks, but larger than + // the latency of getting the latest sealed L2 block number from Postgres. If the API server + // processes enough requests, information about the latest sealed L2 block will be updated + // by reporting block difference metrics, so the actual update lag would be much smaller than this value. + const UPDATE_INTERVAL: Duration = Duration::from_millis(25); + + while !*stop_receiver.0.borrow_and_update() { + let mut connection = self.pool.connection_tagged("api").await.unwrap(); + let Some(last_sealed_l2_block) = + connection.blocks_dal().get_sealed_l2_block_number().await? + else { + tokio::time::sleep(UPDATE_INTERVAL).await; + continue; + }; + drop(connection); + + self.number_updater.update(last_sealed_l2_block); + + if tokio::time::timeout(UPDATE_INTERVAL, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index a09938055fa..ba1a69e23bb 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -32,6 +32,7 @@ pub struct PostgresStorageCachesConfig { pub factory_deps_cache_size: u64, pub initial_writes_cache_size: u64, pub latest_values_cache_size: u64, + pub latest_values_max_block_lag: u32, } /// Wiring layer for the `TxSender`. @@ -133,10 +134,13 @@ impl WiringLayer for TxSenderLayer { PostgresStorageCaches::new(factory_deps_capacity, initial_writes_capacity); let postgres_storage_caches_task = if values_capacity > 0 { - Some( - storage_caches - .configure_storage_values_cache(values_capacity, replica_pool.clone()), - ) + let update_task = storage_caches.configure_storage_values_cache( + values_capacity, + self.postgres_storage_caches_config + .latest_values_max_block_lag, + replica_pool.clone(), + ); + Some(update_task) } else { None }; diff --git a/core/node/node_framework/src/service/error.rs b/core/node/node_framework/src/service/error.rs index 890cc6b7d4b..66a1c13e873 100644 --- a/core/node/node_framework/src/service/error.rs +++ b/core/node/node_framework/src/service/error.rs @@ -1,20 +1,41 @@ +use std::fmt; + use crate::{task::TaskId, wiring_layer::WiringError}; /// An error that can occur during the task lifecycle. #[derive(Debug, thiserror::Error)] pub enum TaskError { - #[error("Task {0} failed: {1}")] + #[error("Task {0} failed: {1:#}")] TaskFailed(TaskId, anyhow::Error), #[error("Task {0} panicked: {1}")] TaskPanicked(TaskId, String), #[error("Shutdown for task {0} timed out")] TaskShutdownTimedOut(TaskId), - #[error("Shutdown hook {0} failed: {1}")] + #[error("Shutdown hook {0} failed: {1:#}")] ShutdownHookFailed(TaskId, anyhow::Error), #[error("Shutdown hook {0} timed out")] ShutdownHookTimedOut(TaskId), } +/// Wrapper of a list of errors with a reasonable formatting. +pub struct TaskErrors(pub Vec); + +impl From> for TaskErrors { + fn from(errs: Vec) -> Self { + Self(errs) + } +} + +impl fmt::Debug for TaskErrors { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0 + .iter() + .map(|err| format!("{err:#}")) + .collect::>() + .fmt(f) + } +} + /// An error that can occur during the service lifecycle. #[derive(Debug, thiserror::Error)] pub enum ZkStackServiceError { @@ -25,5 +46,5 @@ pub enum ZkStackServiceError { #[error("One or more wiring layers failed to initialize: {0:?}")] Wiring(Vec<(String, WiringError)>), #[error("One or more tasks failed: {0:?}")] - Task(Vec), + Task(TaskErrors), } diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index b6d42009354..00e50f7dc3b 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -171,7 +171,7 @@ impl ZkStackService { if self.errors.is_empty() { Ok(()) } else { - Err(ZkStackServiceError::Task(self.errors)) + Err(ZkStackServiceError::Task(self.errors.into())) } } diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index 27b07fec621..b10cdca8a82 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -44,3 +44,4 @@ zksync_node_test_utils.workspace = true assert_matches.workspace = true once_cell.workspace = true test-casing.workspace = true +backon.workspace = true diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 6075ff048bf..1be7e00543f 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -15,6 +15,7 @@ use zksync_state_keeper::{ updates::UpdatesManager, }; use zksync_types::{ + block::UnsealedL1BatchHeader, protocol_upgrade::ProtocolUpgradeTx, protocol_version::{ProtocolSemanticVersion, VersionPatch}, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -103,6 +104,63 @@ impl ExternalIO { } }) } + + async fn ensure_protocol_version_is_saved( + &self, + protocol_version: ProtocolVersionId, + ) -> anyhow::Result<()> { + let base_system_contract_hashes = self + .pool + .connection_tagged("sync_layer") + .await? + .protocol_versions_dal() + .get_base_system_contract_hashes_by_version_id(protocol_version as u16) + .await?; + if base_system_contract_hashes.is_some() { + return Ok(()); + } + tracing::info!("Fetching protocol version {protocol_version:?} from the main node"); + + let protocol_version = self + .main_node_client + .fetch_protocol_version(protocol_version) + .await + .context("failed to fetch protocol version from the main node")? + .context("protocol version is missing on the main node")?; + let minor = protocol_version + .minor_version() + .context("Missing minor protocol version")?; + let bootloader_code_hash = protocol_version + .bootloader_code_hash() + .context("Missing bootloader code hash")?; + let default_account_code_hash = protocol_version + .default_account_code_hash() + .context("Missing default account code hash")?; + let evm_emulator_code_hash = protocol_version.evm_emulator_code_hash(); + let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); + self.pool + .connection_tagged("sync_layer") + .await? + .protocol_versions_dal() + .save_protocol_version( + ProtocolSemanticVersion { + minor: minor + .try_into() + .context("cannot convert protocol version")?, + patch: VersionPatch(0), + }, + protocol_version.timestamp, + Default::default(), // verification keys are unused for EN + BaseSystemContractsHashes { + bootloader: bootloader_code_hash, + default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, + }, + l2_system_upgrade_tx_hash, + ) + .await?; + Ok(()) + } } impl IoSealCriteria for ExternalIO { @@ -154,6 +212,14 @@ impl StateKeeperIO for ExternalIO { ) })?; let Some(mut pending_l2_block_header) = pending_l2_block_header else { + tracing::info!( + l1_batch_number = %cursor.l1_batch, + "No pending L2 blocks found; pruning unsealed batch if exists as we need at least one L2 block to initialize" + ); + storage + .blocks_dal() + .delete_unsealed_l1_batch(cursor.l1_batch - 1) + .await?; return Ok((cursor, None)); }; @@ -185,7 +251,7 @@ impl StateKeeperIO for ExternalIO { pending_l2_block_header.set_protocol_version(protocol_version); } - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .l1_batch_params_provider .load_l1_batch_params( &mut storage, @@ -200,7 +266,15 @@ impl StateKeeperIO for ExternalIO { cursor.l1_batch ) })?; - let data = load_pending_batch(&mut storage, system_env, l1_batch_env) + storage + .blocks_dal() + .ensure_unsealed_l1_batch_exists( + l1_batch_env + .clone() + .into_unsealed_header(Some(system_env.version)), + ) + .await?; + let data = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .with_context(|| { format!( @@ -236,6 +310,21 @@ impl StateKeeperIO for ExternalIO { "L2 block number mismatch: expected {}, got {first_l2_block_number}", cursor.next_l2_block ); + + self.ensure_protocol_version_is_saved(params.protocol_version) + .await?; + self.pool + .connection_tagged("sync_layer") + .await? + .blocks_dal() + .insert_l1_batch(UnsealedL1BatchHeader { + number: cursor.l1_batch, + timestamp: params.first_l2_block.timestamp, + protocol_version: Some(params.protocol_version), + fee_address: params.operator_address, + fee_input: params.fee_input, + }) + .await?; return Ok(Some(params)); } other => { @@ -321,63 +410,36 @@ impl StateKeeperIO for ExternalIO { .connection_tagged("sync_layer") .await? .protocol_versions_dal() - .load_base_system_contracts_by_version_id(protocol_version as u16) - .await - .context("failed loading base system contracts")?; - - if let Some(contracts) = base_system_contracts { - return Ok(contracts); - } - tracing::info!("Fetching protocol version {protocol_version:?} from the main node"); - - let protocol_version = self - .main_node_client - .fetch_protocol_version(protocol_version) - .await - .context("failed to fetch protocol version from the main node")? - .context("protocol version is missing on the main node")?; - let minor = protocol_version - .minor_version() - .context("Missing minor protocol version")?; - let bootloader_code_hash = protocol_version - .bootloader_code_hash() - .context("Missing bootloader code hash")?; - let default_account_code_hash = protocol_version - .default_account_code_hash() - .context("Missing default account code hash")?; - let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); - self.pool - .connection_tagged("sync_layer") + .get_base_system_contract_hashes_by_version_id(protocol_version as u16) .await? - .protocol_versions_dal() - .save_protocol_version( - ProtocolSemanticVersion { - minor: minor - .try_into() - .context("cannot convert protocol version")?, - patch: VersionPatch(0), - }, - protocol_version.timestamp, - Default::default(), // verification keys are unused for EN - BaseSystemContractsHashes { - bootloader: bootloader_code_hash, - default_aa: default_account_code_hash, - }, - l2_system_upgrade_tx_hash, - ) - .await?; + .with_context(|| { + format!("Cannot load base system contracts' hashes for {protocol_version:?}. They should already be present") + })?; let bootloader = self - .get_base_system_contract(bootloader_code_hash, cursor.next_l2_block) + .get_base_system_contract(base_system_contracts.bootloader, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch bootloader code for {protocol_version:?}"))?; let default_aa = self - .get_base_system_contract(default_account_code_hash, cursor.next_l2_block) + .get_base_system_contract(base_system_contracts.default_aa, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch default AA code for {protocol_version:?}"))?; + let evm_emulator = if let Some(hash) = base_system_contracts.evm_emulator { + Some( + self.get_base_system_contract(hash, cursor.next_l2_block) + .await + .with_context(|| { + format!("cannot fetch EVM emulator code for {protocol_version:?}") + })?, + ) + } else { + None + }; + Ok(BaseSystemContracts { bootloader, default_aa, + evm_emulator, }) } @@ -414,3 +476,98 @@ impl StateKeeperIO for ExternalIO { Ok(hash) } } + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use zksync_dal::{ConnectionPool, CoreDal}; + use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; + use zksync_state_keeper::{io::L1BatchParams, L2BlockParams, StateKeeperIO}; + use zksync_types::{ + api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, + H256, + }; + + use crate::{sync_action::SyncAction, testonly::MockMainNodeClient, ActionQueue, ExternalIO}; + + #[tokio::test] + async fn insert_batch_with_protocol_version() { + // Whenever ExternalIO inserts an unsealed batch into DB it should populate it with protocol + // version and make sure that it is present in the DB (i.e. fetch it from main node if not). + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + insert_genesis_batch(&mut conn, &GenesisParams::mock()) + .await + .unwrap(); + let (actions_sender, action_queue) = ActionQueue::new(); + let mut client = MockMainNodeClient::default(); + let next_protocol_version = api::ProtocolVersion { + minor_version: Some(ProtocolVersionId::next() as u16), + timestamp: 1, + bootloader_code_hash: Some(H256::repeat_byte(1)), + default_account_code_hash: Some(H256::repeat_byte(1)), + evm_emulator_code_hash: Some(H256::repeat_byte(1)), + ..api::ProtocolVersion::default() + }; + client.insert_protocol_version(next_protocol_version.clone()); + let mut external_io = ExternalIO::new( + pool.clone(), + action_queue, + Box::new(client), + L2ChainId::default(), + ) + .unwrap(); + + let (cursor, _) = external_io.initialize().await.unwrap(); + let params = L1BatchParams { + protocol_version: ProtocolVersionId::next(), + validation_computational_gas_limit: u32::MAX, + operator_address: Default::default(), + fee_input: BatchFeeInput::pubdata_independent(2, 3, 4), + first_l2_block: L2BlockParams { + timestamp: 1, + virtual_blocks: 1, + }, + pubdata_params: Default::default(), + }; + actions_sender + .push_action_unchecked(SyncAction::OpenBatch { + params: params.clone(), + number: L1BatchNumber(1), + first_l2_block_number: L2BlockNumber(1), + }) + .await + .unwrap(); + let fetched_params = external_io + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .unwrap(); + assert_eq!(fetched_params, params); + + // Verify that the next protocol version is in DB + let fetched_protocol_version = conn + .protocol_versions_dal() + .get_protocol_version_with_latest_patch(ProtocolVersionId::next()) + .await + .unwrap() + .unwrap(); + assert_eq!( + fetched_protocol_version.version.minor as u16, + next_protocol_version.minor_version.unwrap() + ); + + // Verify that the unsealed batch has protocol version + let unsealed_batch = conn + .blocks_dal() + .get_unsealed_l1_batch() + .await + .unwrap() + .unwrap(); + assert_eq!( + unsealed_batch.protocol_version, + Some(fetched_protocol_version.version.minor) + ); + } +} diff --git a/core/node/node_sync/src/fetcher.rs b/core/node/node_sync/src/fetcher.rs index 08e3d426243..9c76d1d93ca 100644 --- a/core/node/node_sync/src/fetcher.rs +++ b/core/node/node_sync/src/fetcher.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state_keeper::io::{common::IoCursor, L1BatchParams, L2BlockParams}; @@ -78,6 +79,14 @@ impl TryFrom for FetchedBlock { )); } + let pubdata_params = if block.protocol_version.is_pre_gateway() { + block.pubdata_params.unwrap_or_default() + } else { + block + .pubdata_params + .context("Missing `pubdata_params` for post-gateway payload")? + }; + Ok(Self { number: block.number, l1_batch_number: block.l1_batch_number, @@ -94,7 +103,7 @@ impl TryFrom for FetchedBlock { .into_iter() .map(FetchedTransaction::new) .collect(), - pubdata_params: block.pubdata_params.unwrap_or_default(), + pubdata_params, }) } } diff --git a/core/node/node_sync/src/genesis.rs b/core/node/node_sync/src/genesis.rs index ccc26b417e9..c5d4869175d 100644 --- a/core/node/node_sync/src/genesis.rs +++ b/core/node/node_sync/src/genesis.rs @@ -38,6 +38,7 @@ async fn create_genesis_params( let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader: config.bootloader_hash.context("Genesis is not finished")?, default_aa: config.default_aa_hash.context("Genesis is not finished")?, + evm_emulator: config.evm_emulator_hash, }; if zksync_chain_id != config.l2_chain_id { @@ -47,10 +48,11 @@ async fn create_genesis_params( // Load the list of addresses that are known to contain system contracts at any point in time. // Not every of these addresses is guaranteed to be present in the genesis state, but we'll iterate through // them and try to fetch the contract bytecode for each of them. - let system_contract_addresses: Vec<_> = get_system_smart_contracts() - .into_iter() - .map(|contract| *contract.account_id.address()) - .collect(); + let system_contract_addresses: Vec<_> = + get_system_smart_contracts(config.evm_emulator_hash.is_some()) + .into_iter() + .map(|contract| *contract.account_id.address()) + .collect(); // These have to be *initial* base contract hashes of main node // (those that were used during genesis), not necessarily the current ones. @@ -103,6 +105,18 @@ async fn fetch_base_system_contracts( .fetch_system_contract_by_hash(contract_hashes.default_aa) .await? .context("default AA bytecode is missing on main node")?; + let evm_emulator = if let Some(hash) = contract_hashes.evm_emulator { + let bytes = client + .fetch_system_contract_by_hash(hash) + .await? + .context("EVM emulator bytecode is missing on main node")?; + Some(SystemContractCode { + code: zksync_utils::bytes_to_be_words(bytes), + hash, + }) + } else { + None + }; Ok(BaseSystemContracts { bootloader: SystemContractCode { code: zksync_utils::bytes_to_be_words(bootloader_bytecode), @@ -112,5 +126,6 @@ async fn fetch_base_system_contracts( code: zksync_utils::bytes_to_be_words(default_aa_bytecode), hash: contract_hashes.default_aa, }, + evm_emulator, }) } diff --git a/core/node/node_sync/src/sync_action.rs b/core/node/node_sync/src/sync_action.rs index 4505dbb93ab..897abfafb2a 100644 --- a/core/node/node_sync/src/sync_action.rs +++ b/core/node/node_sync/src/sync_action.rs @@ -33,6 +33,18 @@ impl ActionQueueSender { Ok(()) } + /// Pushes a single action into the queue without checking validity of the sequence. + /// + /// Useful to simulate situations where only a part of the sequence was executed on the node. + #[cfg(test)] + pub async fn push_action_unchecked(&self, action: SyncAction) -> anyhow::Result<()> { + self.0 + .send(action) + .await + .map_err(|_| anyhow::anyhow!("node action processor stopped"))?; + Ok(()) + } + /// Checks whether the action sequence is valid. /// Returned error is meant to be used as a panic message, since an invalid sequence represents an unrecoverable /// error. This function itself does not panic for the ease of testing. diff --git a/core/node/node_sync/src/sync_state.rs b/core/node/node_sync/src/sync_state.rs index e061ff7da01..f8a2fe00ec0 100644 --- a/core/node/node_sync/src/sync_state.rs +++ b/core/node/node_sync/src/sync_state.rs @@ -173,6 +173,7 @@ impl CheckHealth for SyncState { Health::from(&*self.0.borrow()) } } + impl SyncStateInner { fn is_synced(&self) -> (bool, Option) { if let (Some(main_node_block), Some(local_block)) = (self.main_node_block, self.local_block) diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 8582bbe9374..172a00e8c14 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -2,6 +2,7 @@ use std::{iter, sync::Arc, time::Duration}; +use backon::{ConstantBuilder, Retryable}; use test_casing::test_casing; use tokio::{sync::watch, task::JoinHandle}; use zksync_contracts::BaseSystemContractsHashes; @@ -18,7 +19,7 @@ use zksync_state_keeper::{ }; use zksync_types::{ api, - block::L2BlockHasher, + block::{L2BlockHasher, UnsealedL1BatchHeader}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -62,12 +63,12 @@ impl MockMainNodeClient { l2_fair_gas_price: 3, fair_pubdata_price: Some(24), base_system_contracts_hashes: BaseSystemContractsHashes::default(), - pubdata_params: Default::default(), operator_address: Address::repeat_byte(2), transactions: Some(vec![]), virtual_blocks: Some(0), hash: Some(snapshot.l2_block_hash), protocol_version: ProtocolVersionId::latest(), + pubdata_params: Default::default(), }; Self { @@ -107,7 +108,9 @@ impl StateKeeperHandles { let sync_state = SyncState::default(); let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Some(Address::repeat_byte(1)), 5); + StateKeeperPersistence::new(pool.clone(), Some(Address::repeat_byte(1)), 5) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(pool.clone()); let output_handler = OutputHandler::new(Box::new(persistence.with_tx_insertion())) .with_handler(Box::new(tree_writes_persistence)) @@ -306,6 +309,7 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo timestamp: snapshot.l2_block_timestamp + 1, bootloader_code_hash: Some(H256::repeat_byte(1)), default_account_code_hash: Some(H256::repeat_byte(1)), + evm_emulator_code_hash: Some(H256::repeat_byte(1)), ..api::ProtocolVersion::default() }; client.insert_protocol_version(next_protocol_version.clone()); @@ -347,6 +351,13 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo next_protocol_version.default_account_code_hash.unwrap() ); + assert_eq!( + persisted_protocol_version + .base_system_contracts_hashes + .evm_emulator, + next_protocol_version.evm_emulator_code_hash + ); + let l2_block = storage .blocks_dal() .get_l2_block_header(snapshot.l2_block_number + 1) @@ -646,3 +657,101 @@ async fn external_io_with_multiple_l1_batches() { assert_eq!(fictive_l2_block.timestamp, 2); assert_eq!(fictive_l2_block.l2_tx_count, 0); } + +async fn wait_for_batch_to_be_open( + pool: &ConnectionPool, + number: L1BatchNumber, +) -> anyhow::Result { + (|| async { + let mut storage = pool.connection().await.unwrap(); + let unsealed_batch = storage.blocks_dal().get_unsealed_l1_batch().await?; + + if let Some(unsealed_batch) = unsealed_batch { + if unsealed_batch.number == number { + Ok(unsealed_batch) + } else { + Err(anyhow::anyhow!("L1 batch #{number} is not open yet")) + } + } else { + Err(anyhow::anyhow!("No unsealed L1 batch found yet")) + } + }) + .retry( + &ConstantBuilder::default() + .with_delay(Duration::from_millis(200)) + .with_max_times(20), + ) + .await +} + +#[tokio::test] +async fn external_io_empty_unsealed_batch() { + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + ensure_genesis(&mut storage).await; + drop(storage); + + let open_batch_one = open_l1_batch(1, 1, 1); + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + let tx = FetchedTransaction::new(tx.into()); + let open_batch_two = open_l1_batch(2, 2, 3); + let fictive_l2_block = SyncAction::L2Block { + params: L2BlockParams { + timestamp: 2, + virtual_blocks: 0, + }, + number: L2BlockNumber(2), + }; + let actions1 = vec![open_batch_one, tx.into(), SyncAction::SealL2Block]; + let actions2 = vec![fictive_l2_block, SyncAction::SealBatch]; + + let (actions_sender, action_queue) = ActionQueue::new(); + let client = MockMainNodeClient::default(); + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&[tx_hash]]).await; + actions_sender.push_actions(actions1).await.unwrap(); + actions_sender.push_actions(actions2).await.unwrap(); + // Unchecked insert of batch #2 to simulate restart in the middle of processing an action sequence + // In other words batch #2 is inserted completely empty with no blocks/txs present in it + actions_sender + .push_action_unchecked(open_batch_two.clone()) + .await + .unwrap(); + // Wait until the L2 block is sealed. + state_keeper.wait_for_local_block(L2BlockNumber(2)).await; + + // Wait until L1 batch #2 is opened and persisted. + let unsealed_batch = wait_for_batch_to_be_open(&pool, L1BatchNumber(2)) + .await + .unwrap(); + assert_eq!(unsealed_batch.number, L1BatchNumber(2)); + assert_eq!(unsealed_batch.timestamp, 2); + + // Prepare the rest of batch #2 + let tx = create_l2_transaction(20, 200); + let tx_hash = tx.hash(); + let tx = FetchedTransaction::new(tx.into()); + let fictive_l2_block = SyncAction::L2Block { + params: L2BlockParams { + timestamp: 4, + virtual_blocks: 0, + }, + number: L2BlockNumber(4), + }; + let actions1 = vec![open_batch_two, tx.into(), SyncAction::SealL2Block]; + let actions2 = vec![fictive_l2_block, SyncAction::SealBatch]; + + // Restart state keeper + let (actions_sender, action_queue) = ActionQueue::new(); + let client = MockMainNodeClient::default(); + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&[tx_hash]]).await; + actions_sender.push_actions(actions1).await.unwrap(); + actions_sender.push_actions(actions2).await.unwrap(); + + let hash_task = tokio::spawn(mock_l1_batch_hash_computation(pool.clone(), 1)); + // Wait until the block #4 is sealed. + state_keeper.wait_for_local_block(L2BlockNumber(4)).await; + hash_task.await.unwrap(); +} diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 82063b23fdb..e2ddc972a2f 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -17,9 +17,12 @@ zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true +zksync_vm_executor.workspace = true +zksync_utils.workspace = true anyhow.workspace = true axum.workspace = true tokio.workspace = true +tower-http = { workspace = true, features = ["compression-zstd", "decompression-zstd"] } tracing.workspace = true [dev-dependencies] diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 15ef393294a..7d0e33ea0a3 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -6,6 +6,7 @@ use zksync_dal::DalError; use zksync_object_store::ObjectStoreError; pub(crate) enum RequestProcessorError { + GeneralError(String), ObjectStore(ObjectStoreError), Dal(DalError), } @@ -19,24 +20,26 @@ impl From for RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { - RequestProcessorError::ObjectStore(err) => { + Self::GeneralError(err) => { + tracing::error!("Error: {:?}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "An internal error occurred".to_owned(), + ) + } + Self::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); ( StatusCode::BAD_GATEWAY, "Failed fetching/saving from GCS".to_owned(), ) } - RequestProcessorError::Dal(err) => { + Self::Dal(err) => { tracing::error!("Sqlx error: {:?}", err); - match err.inner() { - zksync_dal::SqlxError::RowNotFound => { - (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) - } - _ => ( - StatusCode::BAD_GATEWAY, - "Failed fetching/saving from db".to_owned(), - ), - } + ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from db".to_owned(), + ) } }; (status_code, message).into_response() diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 618a786ea65..e014fca15d7 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -1,7 +1,7 @@ use std::{net::SocketAddr, sync::Arc}; use anyhow::Context as _; -use axum::{extract::Path, routing::post, Json, Router}; +use axum::{extract::Path, http::StatusCode, response::IntoResponse, routing::post, Json, Router}; use request_processor::RequestProcessor; use tee_request_processor::TeeRequestProcessor; use tokio::sync::watch; @@ -12,7 +12,7 @@ use zksync_prover_interface::api::{ ProofGenerationDataRequest, RegisterTeeAttestationRequest, SubmitProofRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest, }; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; #[cfg(test)] mod tests; @@ -27,11 +27,18 @@ pub async fn run_server( blob_store: Arc, connection_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); - tracing::debug!("Starting proof data handler server on {bind_address}"); - let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); + tracing::info!("Starting proof data handler server on {bind_address}"); + let app = create_proof_processing_router( + blob_store, + connection_pool, + config, + commitment_mode, + l2_chain_id, + ); let listener = tokio::net::TcpListener::bind(bind_address) .await @@ -54,6 +61,7 @@ fn create_proof_processing_router( connection_pool: ConnectionPool, config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Router { let get_proof_gen_processor = RequestProcessor::new( blob_store.clone(), @@ -86,9 +94,9 @@ fn create_proof_processing_router( ), ); - if config.tee_support { + if config.tee_config.tee_support { let get_tee_proof_gen_processor = - TeeRequestProcessor::new(blob_store, connection_pool, config.clone()); + TeeRequestProcessor::new(blob_store, connection_pool, config.clone(), l2_chain_id); let submit_tee_proof_processor = get_tee_proof_gen_processor.clone(); let register_tee_attestation_processor = get_tee_proof_gen_processor.clone(); @@ -96,9 +104,15 @@ fn create_proof_processing_router( "/tee/proof_inputs", post( move |payload: Json| async move { - get_tee_proof_gen_processor + let result = get_tee_proof_gen_processor .get_proof_generation_data(payload) - .await + .await; + + match result { + Ok(Some(data)) => (StatusCode::OK, data).into_response(), + Ok(None) => { StatusCode::NO_CONTENT.into_response()}, + Err(e) => e.into_response(), + } }, ), ) @@ -125,4 +139,6 @@ fn create_proof_processing_router( } router + .layer(tower_http::compression::CompressionLayer::new()) + .layer(tower_http::decompression::RequestDecompressionLayer::new().zstd(true)) } diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index eea3925bdd7..89304724a7c 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -244,11 +244,10 @@ impl RequestProcessor { || bootloader_heap_initial_content != bootloader_heap_initial_content_from_prover { - let server_values = format!("events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}"); - let prover_values = format!("events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}"); panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values + "Auxilary output doesn't match\n\ + server values: events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}\n\ + prover values: events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}", ); } @@ -261,8 +260,9 @@ impl RequestProcessor { .system_logs .iter() .find_map(|log| { - (log.0.key == H256::from_low_u64_be(STATE_DIFF_HASH_KEY_PRE_GATEWAY)) - .then_some(log.0.value) + (log.0.key + == H256::from_low_u64_be(STATE_DIFF_HASH_KEY_PRE_GATEWAY as u64)) + .then_some(log.0.value) }) .expect("Failed to get state_diff_hash from system logs") } else { diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 4ae1a5026f1..8e06d0c26bc 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -4,11 +4,17 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::{ObjectStore, ObjectStoreError}; -use zksync_prover_interface::api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, +use zksync_prover_interface::{ + api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, + }, + inputs::{ + TeeVerifierInput, V1TeeVerifierInput, VMRunWitnessInputData, WitnessInputMerklePaths, + }, }; -use zksync_types::{tee_types::TeeType, L1BatchNumber}; +use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; +use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::errors::RequestProcessorError; @@ -17,6 +23,7 @@ pub(crate) struct TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, } impl TeeRequestProcessor { @@ -24,45 +31,52 @@ impl TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, ) -> Self { Self { blob_store, pool, config, + l2_chain_id, } } pub(crate) async fn get_proof_generation_data( &self, request: Json, - ) -> Result, RequestProcessorError> { + ) -> Result>, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - let mut min_batch_number: Option = None; + let mut min_batch_number = self.config.tee_config.first_tee_processed_batch; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; let result = loop { - let l1_batch_number = match self + let Some(l1_batch_number) = self .lock_batch_for_proving(request.tee_type, min_batch_number) .await? - { - Some(number) => number, - None => break Ok(Json(TeeProofGenerationDataResponse(None))), + else { + // No job available + return Ok(None); }; - match self.blob_store.get(l1_batch_number).await { - Ok(input) => break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))), - Err(ObjectStoreError::KeyNotFound(_)) => { + match self + .tee_verifier_input_for_existing_batch(l1_batch_number) + .await + { + Ok(input) => { + break Ok(Some(Json(TeeProofGenerationDataResponse(Box::new(input))))); + } + Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { missing_range = match missing_range { Some((start, _)) => Some((start, l1_batch_number)), None => Some((l1_batch_number, l1_batch_number)), }; self.unlock_batch(l1_batch_number, request.tee_type).await?; - min_batch_number = Some(min_batch_number.unwrap_or(l1_batch_number) + 1); + min_batch_number = l1_batch_number + 1; } Err(err) => { self.unlock_batch(l1_batch_number, request.tee_type).await?; - break Err(RequestProcessorError::ObjectStore(err)); + break Err(err); } } }; @@ -78,14 +92,74 @@ impl TeeRequestProcessor { result } + #[tracing::instrument(skip(self))] + async fn tee_verifier_input_for_existing_batch( + &self, + l1_batch_number: L1BatchNumber, + ) -> Result { + let vm_run_data: VMRunWitnessInputData = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let merkle_paths: WitnessInputMerklePaths = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let mut connection = self + .pool + .connection_tagged("tee_request_processor") + .await + .map_err(RequestProcessorError::Dal)?; + + let l2_blocks_execution_data = connection + .transactions_dal() + .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) + .await + .map_err(RequestProcessorError::Dal)?; + + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))?; + + // In the state keeper, this value is used to reject execution. + // All batches have already been executed by State Keeper. + // This means we don't want to reject any execution, therefore we're using MAX as an allow all. + let validation_computational_gas_limit = u32::MAX; + + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params_provider + .load_l1_batch_env( + &mut connection, + l1_batch_number, + validation_computational_gas_limit, + self.l2_chain_id, + ) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))? + .ok_or(RequestProcessorError::GeneralError( + "system_env, l1_batch_env missing".into(), + ))?; + + Ok(TeeVerifierInput::new(V1TeeVerifierInput { + vm_run_data, + merkle_paths, + l2_blocks_execution_data, + l1_batch_env, + system_env, + pubdata_params, + })) + } + async fn lock_batch_for_proving( &self, tee_type: TeeType, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> Result, RequestProcessorError> { - let result = self - .pool - .connection() + self.pool + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .lock_batch_for_proving( @@ -93,8 +167,8 @@ impl TeeRequestProcessor { self.config.proof_generation_timeout(), min_batch_number, ) - .await?; - Ok(result) + .await + .map_err(RequestProcessorError::Dal) } async fn unlock_batch( @@ -103,7 +177,7 @@ impl TeeRequestProcessor { tee_type: TeeType, ) -> Result<(), RequestProcessorError> { self.pool - .connection() + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .unlock_batch(l1_batch_number, tee_type) @@ -117,7 +191,7 @@ impl TeeRequestProcessor { Json(proof): Json, ) -> Result, RequestProcessorError> { let l1_batch_number = L1BatchNumber(l1_batch_number); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); tracing::info!( @@ -143,7 +217,7 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received attestation: {:?}", payload); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); dal.save_attestation(&payload.pubkey, &payload.attestation) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 4319fce6216..63ea087a81c 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -1,5 +1,3 @@ -use std::time::Instant; - use axum::{ body::Body, http::{self, Method, Request, StatusCode}, @@ -8,128 +6,67 @@ use axum::{ }; use serde_json::json; use tower::ServiceExt; -use zksync_basic_types::U256; -use zksync_config::configs::ProofDataHandlerConfig; -use zksync_contracts::{BaseSystemContracts, SystemContractCode}; +use zksync_basic_types::L2ChainId; +use zksync_config::configs::{ProofDataHandlerConfig, TeeConfig}; use zksync_dal::{ConnectionPool, CoreDal}; -use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_object_store::MockObjectStore; -use zksync_prover_interface::{ - api::SubmitTeeProofRequest, - inputs::{TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths}, -}; -use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber, H256}; +use zksync_prover_interface::api::SubmitTeeProofRequest; +use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber}; use crate::create_proof_processing_router; -// Test the /tee/proof_inputs endpoint by: -// 1. Mocking an object store with a single batch blob containing TEE verifier input -// 2. Populating the SQL db with relevant information about the status of the TEE verifier input and -// TEE proof generation -// 3. Sending a request to the /tee/proof_inputs endpoint and asserting that the response -// matches the file from the object store #[tokio::test] async fn request_tee_proof_inputs() { - // prepare a sample mocked TEE verifier input - - let batch_number = L1BatchNumber::from(1); - let tvi = V1TeeVerifierInput::new( - WitnessInputMerklePaths::new(0), - vec![], - L1BatchEnv { - previous_batch_hash: Some(H256([1; 32])), - number: batch_number, - timestamp: 0, - fee_input: Default::default(), - fee_account: Default::default(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 0, - timestamp: 0, - prev_block_hash: H256([1; 32]), - max_virtual_blocks_to_create: 0, - }, - }, - SystemEnv { - zk_porter_available: false, - version: Default::default(), - base_system_smart_contracts: BaseSystemContracts { - bootloader: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - default_aa: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - }, - bootloader_gas_limit: 0, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: 0, - chain_id: Default::default(), - pubdata_params: Default::default(), - }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], - ); - let tvi = TeeVerifierInput::V1(tvi); - - // populate mocked object store with a single batch blob - - let blob_store = MockObjectStore::arc(); - let object_path = blob_store.put(batch_number, &tvi).await.unwrap(); - - // get connection to the SQL db and mock the status of the TEE proof generation - let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, &object_path).await; - - // test the /tee/proof_inputs endpoint; it should return the batch from the object store let app = create_proof_processing_router( - blob_store, - db_conn_pool, + MockObjectStore::arc(), + db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + }, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); - let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "sgx" })).unwrap()); - let response = app - .oneshot( - Request::builder() - .method(Method::POST) - .uri("/tee/proof_inputs") - .header(http::header::CONTENT_TYPE, "application/json") - .body(req_body) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - let deserialized: TeeVerifierInput = serde_json::from_value(json).unwrap(); - - assert_eq!(tvi, deserialized); + let test_cases = vec![ + (json!({ "tee_type": "sgx" }), StatusCode::NO_CONTENT), + ( + json!({ "tee_type": "Sgx" }), + StatusCode::UNPROCESSABLE_ENTITY, + ), + ]; + + for (body, expected_status) in test_cases { + let req_body = Body::from(serde_json::to_vec(&body).unwrap()); + let response = app + .clone() + .oneshot( + Request::builder() + .method(Method::POST) + .uri("/tee/proof_inputs") + .header(http::header::CONTENT_TYPE, "application/json") + .body(req_body) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), expected_status); + } } // Test /tee/submit_proofs endpoint using a mocked TEE proof and verify response and db state #[tokio::test] async fn submit_tee_proof() { - let blob_store = MockObjectStore::arc(); - let db_conn_pool = ConnectionPool::test_pool().await; - let object_path = "mocked_object_path"; let batch_number = L1BatchNumber::from(1); + let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, object_path).await; - - // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof + mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; let tee_proof_request_str = r#"{ "signature": "0001020304", @@ -141,14 +78,18 @@ async fn submit_tee_proof() { serde_json::from_str::(tee_proof_request_str).unwrap(); let uri = format!("/tee/submit_proofs/{}", batch_number.0); let app = create_proof_processing_router( - blob_store, + MockObjectStore::arc(), db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + }, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); // this should fail because we haven't saved the attestation for the pubkey yet @@ -207,32 +148,15 @@ async fn submit_tee_proof() { async fn mock_tee_batch_status( db_conn_pool: ConnectionPool, batch_number: L1BatchNumber, - object_path: &str, ) { let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); let mut proof_dal = proof_db_conn.tee_proof_generation_dal(); - let mut input_db_conn = db_conn_pool.connection().await.unwrap(); - let mut input_producer_dal = input_db_conn.tee_verifier_input_producer_dal(); // there should not be any batches awaiting proof in the db yet let oldest_batch_number = proof_dal.get_oldest_unpicked_batch().await.unwrap(); assert!(oldest_batch_number.is_none()); - // mock SQL table with relevant information about the status of the TEE verifier input - - input_producer_dal - .create_tee_verifier_input_producer_job(batch_number) - .await - .expect("Failed to create tee_verifier_input_producer_job"); - - // pretend that the TEE verifier input blob file was fetched successfully - - input_producer_dal - .mark_job_as_successful(batch_number, Instant::now(), object_path) - .await - .expect("Failed to mark tee_verifier_input_producer_job job as successful"); - // mock SQL table with relevant information about the status of TEE proof generation proof_dal diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index e0a7fa74ef4..2c41ec9293a 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -29,7 +29,6 @@ pub enum InitStage { EthTxAggregator, EthTxManager, Tree, - TeeVerifierInputProducer, Consensus, DADispatcher, } @@ -45,7 +44,6 @@ impl fmt::Display for InitStage { Self::EthTxAggregator => formatter.write_str("eth_tx_aggregator"), Self::EthTxManager => formatter.write_str("eth_tx_manager"), Self::Tree => formatter.write_str("tree"), - Self::TeeVerifierInputProducer => formatter.write_str("tee_verifier_input_producer"), Self::Consensus => formatter.write_str("consensus"), Self::DADispatcher => formatter.write_str("da_dispatcher"), } diff --git a/core/node/state_keeper/src/executor/mod.rs b/core/node/state_keeper/src/executor/mod.rs index 2fa5c3b9c12..903dae2f1ca 100644 --- a/core/node/state_keeper/src/executor/mod.rs +++ b/core/node/state_keeper/src/executor/mod.rs @@ -40,7 +40,7 @@ impl TxExecutionResult { _ => Self::Success { tx_metrics: Box::new(ExecutionMetricsForCriteria::new(Some(tx), &res.tx_result)), gas_remaining: res.tx_result.statistics.gas_remaining, - tx_result: res.tx_result, + tx_result: res.tx_result.clone(), compressed_bytecodes: res.compressed_bytecodes, call_tracer_result: res.call_traces, }, diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index a7fac40236c..cc7945dfa86 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -26,7 +26,7 @@ use zksync_state::{OwnedStorage, ReadStorageFactory, RocksdbStorageOptions}; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ block::L2BlockHasher, - commitment::{L1BatchCommitmentMode, PubdataParams}, + commitment::PubdataParams, ethabi::Token, get_code_key, get_known_code_key, protocol_version::ProtocolSemanticVersion, @@ -126,10 +126,9 @@ impl Tester { &mut self, storage_type: StorageType, ) -> Box> { - let (l1_batch_env, system_env) = self.default_batch_params(); + let (l1_batch_env, system_env, pubdata_params) = self.default_batch_params(); match storage_type { StorageType::AsyncRocksdbCache => { - let (l1_batch_env, system_env) = self.default_batch_params(); let (state_keeper_storage, task) = AsyncRocksdbCache::new( self.pool(), self.state_keeper_db_path(), @@ -144,6 +143,7 @@ impl Tester { Arc::new(state_keeper_storage), l1_batch_env, system_env, + pubdata_params, ) .await } @@ -155,12 +155,18 @@ impl Tester { )), l1_batch_env, system_env, + pubdata_params, ) .await } StorageType::Postgres => { - self.create_batch_executor_inner(Arc::new(self.pool()), l1_batch_env, system_env) - .await + self.create_batch_executor_inner( + Arc::new(self.pool()), + l1_batch_env, + system_env, + pubdata_params, + ) + .await } } } @@ -170,6 +176,7 @@ impl Tester { storage_factory: Arc, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { let (_stop_sender, stop_receiver) = watch::channel(false); let storage = storage_factory @@ -180,11 +187,11 @@ impl Tester { if self.config.trace_calls { let mut executor = MainBatchExecutorFactory::::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } else { let mut executor = MainBatchExecutorFactory::<()>::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } } @@ -234,7 +241,7 @@ impl Tester { snapshot: &SnapshotRecoveryStatus, ) -> Box> { let current_timestamp = snapshot.l2_block_timestamp + 1; - let (mut l1_batch_env, system_env) = + let (mut l1_batch_env, system_env, pubdata_params) = self.batch_params(snapshot.l1_batch_number + 1, current_timestamp); l1_batch_env.previous_batch_hash = Some(snapshot.l1_batch_root_hash); l1_batch_env.first_l2_block = L2BlockEnv { @@ -244,11 +251,11 @@ impl Tester { max_virtual_blocks_to_create: 1, }; - self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env) + self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env, pubdata_params) .await } - pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv) { + pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv, PubdataParams) { // Not really important for the batch executor - it operates over a single batch. self.batch_params(L1BatchNumber(1), 100) } @@ -258,20 +265,16 @@ impl Tester { &self, l1_batch_number: L1BatchNumber, timestamp: u64, - ) -> (L1BatchEnv, SystemEnv) { + ) -> (L1BatchEnv, SystemEnv, PubdataParams) { let mut system_params = default_system_env(); if let Some(vm_gas_limit) = self.config.vm_gas_limit { system_params.bootloader_gas_limit = vm_gas_limit; } system_params.default_validation_computational_gas_limit = self.config.validation_computational_gas_limit; - system_params.pubdata_params = PubdataParams { - l2_da_validator_address: get_da_contract_address(), - pubdata_type: L1BatchCommitmentMode::Rollup, - }; let mut batch_params = default_l1_batch_env(l1_batch_number.0, timestamp, self.fee_account); batch_params.previous_batch_hash = Some(H256::zero()); // Not important in this context. - (batch_params, system_params) + (batch_params, system_params, PubdataParams::default()) } /// Performs the genesis in the storage. @@ -285,7 +288,7 @@ impl Tester { patch: 0.into(), }, &BASE_SYSTEM_CONTRACTS, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), Default::default(), ) .await diff --git a/core/node/state_keeper/src/io/common/mod.rs b/core/node/state_keeper/src/io/common/mod.rs index 6bd881414a2..867ffa7fb37 100644 --- a/core/node/state_keeper/src/io/common/mod.rs +++ b/core/node/state_keeper/src/io/common/mod.rs @@ -3,7 +3,7 @@ use std::time::Duration; use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_types::{L1BatchNumber, L2BlockNumber, H256}; +use zksync_types::{commitment::PubdataParams, L1BatchNumber, L2BlockNumber, H256}; use super::PendingBatchData; @@ -85,6 +85,7 @@ pub async fn load_pending_batch( storage: &mut Connection<'_, Core>, system_env: SystemEnv, l1_batch_env: L1BatchEnv, + pubdata_params: PubdataParams, ) -> anyhow::Result { let pending_l2_blocks = storage .transactions_dal() @@ -104,6 +105,7 @@ pub async fn load_pending_batch( Ok(PendingBatchData { l1_batch_env, system_env, + pubdata_params, pending_l2_blocks, }) } diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index b2a24acb495..ec9f906b1cd 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -318,7 +318,7 @@ async fn loading_pending_batch_with_genesis() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, L1BatchNumber(1), @@ -331,7 +331,7 @@ async fn loading_pending_batch_with_genesis() { assert_eq!(l1_batch_env.first_l2_block.number, 1); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); @@ -396,7 +396,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, snapshot_recovery.l1_batch_number + 1, @@ -406,7 +406,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await .unwrap() .expect("no L1 batch"); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 58e0d56be2d..dfddd36aba7 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -14,6 +14,7 @@ use zksync_mempool::L2TxFilter; use zksync_multivm::{interface::Halt, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ + block::UnsealedL1BatchHeader, commitment::{L1BatchCommitmentMode, PubdataParams}, protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, @@ -51,14 +52,14 @@ pub struct MempoolIO { filter: L2TxFilter, l1_batch_params_provider: L1BatchParamsProvider, fee_account: Address, - l2_da_validator_address: Option
, - pubdata_type: L1BatchCommitmentMode, validation_computational_gas_limit: u32, max_allowed_tx_gas_limit: U256, delay_interval: Duration, // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. batch_fee_input_provider: Arc, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, } impl IoSealCriteria for MempoolIO { @@ -101,7 +102,7 @@ impl StateKeeperIO for MempoolIO { L2BlockSealProcess::clear_pending_l2_block(&mut storage, cursor.next_l2_block - 1).await?; - let Some((system_env, l1_batch_env)) = self + let Some((system_env, l1_batch_env, pubdata_params)) = self .l1_batch_params_provider .load_l1_batch_env( &mut storage, @@ -113,38 +114,39 @@ impl StateKeeperIO for MempoolIO { else { return Ok((cursor, None)); }; - let pending_batch_data = load_pending_batch(&mut storage, system_env, l1_batch_env) - .await - .with_context(|| { - format!( - "failed loading data for re-execution for pending L1 batch #{}", - cursor.l1_batch - ) - })?; + let pending_batch_data = + load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) + .await + .with_context(|| { + format!( + "failed loading data for re-execution for pending L1 batch #{}", + cursor.l1_batch + ) + })?; - let PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - } = pending_batch_data; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(l1_batch_env.fee_input, system_env.version.into()); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + pending_batch_data.l1_batch_env.fee_input, + pending_batch_data.system_env.version.into(), + ); self.filter = L2TxFilter { - fee_input: l1_batch_env.fee_input, + fee_input: pending_batch_data.l1_batch_env.fee_input, fee_per_gas: base_fee, gas_per_pubdata: gas_per_pubdata as u32, }; - Ok(( - cursor, - Some(PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - }), - )) + storage + .blocks_dal() + .ensure_unsealed_l1_batch_exists( + pending_batch_data + .l1_batch_env + .clone() + .into_unsealed_header(Some(pending_batch_data.system_env.version)), + ) + .await?; + + Ok((cursor, Some(pending_batch_data))) } async fn wait_for_new_batch_params( @@ -152,6 +154,32 @@ impl StateKeeperIO for MempoolIO { cursor: &IoCursor, max_wait: Duration, ) -> anyhow::Result> { + // Check if there is an existing unsealed batch + if let Some(unsealed_storage_batch) = self + .pool + .connection_tagged("state_keeper") + .await? + .blocks_dal() + .get_unsealed_l1_batch() + .await? + { + let protocol_version = unsealed_storage_batch + .protocol_version + .context("unsealed batch is missing protocol version")?; + return Ok(Some(L1BatchParams { + protocol_version, + validation_computational_gas_limit: self.validation_computational_gas_limit, + operator_address: unsealed_storage_batch.fee_address, + fee_input: unsealed_storage_batch.fee_input, + first_l2_block: L2BlockParams { + timestamp: unsealed_storage_batch.timestamp, + // This value is effectively ignored by the protocol. + virtual_blocks: 1, + }, + pubdata_params: self.pubdata_params(protocol_version)?, + })); + } + let deadline = Instant::now() + max_wait; // Block until at least one transaction in the mempool can match the filter (or timeout happens). @@ -195,17 +223,18 @@ impl StateKeeperIO for MempoolIO { continue; } - let pubdata_params = match ( - protocol_version.is_pre_gateway(), - self.l2_da_validator_address, - ) { - (true, _) => PubdataParams::default(), - (false, Some(l2_da_validator_address)) => PubdataParams { - l2_da_validator_address, - pubdata_type: self.pubdata_type, - }, - (false, None) => anyhow::bail!("L2 DA validator address not found"), - }; + self.pool + .connection_tagged("state_keeper") + .await? + .blocks_dal() + .insert_l1_batch(UnsealedL1BatchHeader { + number: cursor.l1_batch, + timestamp, + protocol_version: Some(protocol_version), + fee_address: self.fee_account, + fee_input: self.filter.fee_input, + }) + .await?; return Ok(Some(L1BatchParams { protocol_version, @@ -217,7 +246,7 @@ impl StateKeeperIO for MempoolIO { // This value is effectively ignored by the protocol. virtual_blocks: 1, }, - pubdata_params, + pubdata_params: self.pubdata_params(protocol_version)?, })); } Ok(None) @@ -432,10 +461,10 @@ impl MempoolIO { pool: ConnectionPool, config: &StateKeeperConfig, fee_account: Address, - l2_da_validator_address: Option
, - pubdata_type: L1BatchCommitmentMode, delay_interval: Duration, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, ) -> anyhow::Result { Ok(Self { mempool, @@ -446,15 +475,31 @@ impl MempoolIO { // ^ Will be initialized properly on the first newly opened batch l1_batch_params_provider: L1BatchParamsProvider::uninitialized(), fee_account, - l2_da_validator_address, - pubdata_type, validation_computational_gas_limit: config.validation_computational_gas_limit, max_allowed_tx_gas_limit: config.max_allowed_l2_tx_gas_limit.into(), delay_interval, batch_fee_input_provider, chain_id, + l2_da_validator_address, + pubdata_type, }) } + + fn pubdata_params(&self, protocol_version: ProtocolVersionId) -> anyhow::Result { + let pubdata_params = match ( + protocol_version.is_pre_gateway(), + self.l2_da_validator_address, + ) { + (true, _) => PubdataParams::default(), + (false, Some(l2_da_validator_address)) => PubdataParams { + l2_da_validator_address, + pubdata_type: self.pubdata_type, + }, + (false, None) => anyhow::bail!("L2 DA validator address not found"), + }; + + Ok(pubdata_params) + } } /// Getters required for testing the MempoolIO. diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index a7fd49637f2..e2461e72d7b 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -39,11 +39,12 @@ pub struct PendingBatchData { /// (e.g. timestamp) are the same, so transaction would have the same result after re-execution. pub(crate) l1_batch_env: L1BatchEnv, pub(crate) system_env: SystemEnv, + pub(crate) pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub(crate) pending_l2_blocks: Vec, } -#[derive(Debug, Copy, Clone, Default)] +#[derive(Debug, Copy, Clone, Default, PartialEq)] pub struct L2BlockParams { /// The timestamp of the L2 block. pub timestamp: u64, @@ -59,7 +60,7 @@ pub struct L2BlockParams { } /// Parameters for a new L1 batch returned by [`StateKeeperIO::wait_for_new_batch_params()`]. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct L1BatchParams { /// Protocol version for the new L1 batch. pub protocol_version: ProtocolVersionId, @@ -71,7 +72,7 @@ pub struct L1BatchParams { pub fee_input: BatchFeeInput, /// Parameters of the first L2 block in the batch. pub first_l2_block: L2BlockParams, - /// Params related to how the pubdata should be processed by the bootloader in the batch + /// Params related to how the pubdata should be processed by the bootloader in the batch. pub pubdata_params: PubdataParams, } @@ -82,8 +83,8 @@ impl L1BatchParams { contracts: BaseSystemContracts, cursor: &IoCursor, previous_batch_hash: H256, - ) -> (SystemEnv, L1BatchEnv) { - l1_batch_params( + ) -> (SystemEnv, L1BatchEnv, PubdataParams) { + let (system_env, l1_batch_env) = l1_batch_params( cursor.l1_batch, self.operator_address, self.first_l2_block.timestamp, @@ -96,8 +97,9 @@ impl L1BatchParams { self.protocol_version, self.first_l2_block.virtual_blocks, chain_id, - self.pubdata_params, - ) + ); + + (system_env, l1_batch_env, self.pubdata_params) } } diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index dc854c9e58f..06f1972a02a 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; use tokio::sync::{mpsc, oneshot}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_shared_metrics::{BlockStage, APP_METRICS}; -use zksync_types::{writes::TreeWrite, Address}; +use zksync_types::{writes::TreeWrite, Address, ProtocolVersionId}; use zksync_utils::u256_to_h256; use crate::{ @@ -41,13 +41,45 @@ pub struct StateKeeperPersistence { impl StateKeeperPersistence { const SHUTDOWN_MSG: &'static str = "L2 block sealer unexpectedly shut down"; + async fn validate_l2_legacy_shared_bridge_addr( + pool: &ConnectionPool, + l2_legacy_shared_bridge_addr: Option
, + ) -> anyhow::Result<()> { + let mut connection = pool.connection_tagged("state_keeper").await?; + + if let Some(l2_block) = connection + .blocks_dal() + .get_earliest_l2_block_number() + .await + .context("failed to load earliest l2 block number")? + { + let header = connection + .blocks_dal() + .get_l2_block_header(l2_block) + .await + .context("failed to load L2 block header")? + .context("missing L2 block header")?; + let protocol_version = header + .protocol_version + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + + if protocol_version.is_pre_gateway() && l2_legacy_shared_bridge_addr.is_none() { + anyhow::bail!("Missing `l2_legacy_shared_bridge_addr` for chain that was initialized before gateway upgrade"); + } + } + + Ok(()) + } + /// Creates a sealer that will use the provided Postgres connection and will have the specified /// `command_capacity` for unprocessed sealing commands. - pub fn new( + pub async fn new( pool: ConnectionPool, l2_legacy_shared_bridge_addr: Option
, mut command_capacity: usize, - ) -> (Self, L2BlockSealerTask) { + ) -> anyhow::Result<(Self, L2BlockSealerTask)> { + Self::validate_l2_legacy_shared_bridge_addr(&pool, l2_legacy_shared_bridge_addr).await?; + let is_sync = command_capacity == 0; command_capacity = command_capacity.max(1); @@ -67,7 +99,7 @@ impl StateKeeperPersistence { latest_completion_receiver: None, is_sync, }; - (this, sealer) + Ok((this, sealer)) } pub fn with_tx_insertion(mut self) -> Self { @@ -347,7 +379,7 @@ impl StateKeeperOutputHandler for TreeWritesPersistence { #[cfg(test)] mod tests { - use std::collections::HashSet; + use std::collections::{HashMap, HashSet}; use assert_matches::assert_matches; use futures::FutureExt; @@ -396,11 +428,13 @@ mod tests { pool.clone(), Some(Address::default()), l2_block_sealer_capacity, - ); + ) + .await + .unwrap(); let mut output_handler = OutputHandler::new(Box::new(persistence)) .with_handler(Box::new(TreeWritesPersistence::new(pool.clone()))); tokio::spawn(l2_block_sealer.run()); - execute_mock_batch(&mut output_handler).await; + execute_mock_batch(&mut output_handler, &pool).await; // Check that L2 block #1 and L1 batch #1 are persisted. let mut storage = pool.connection().await.unwrap(); @@ -449,9 +483,20 @@ mod tests { assert_eq!(actual_index, expected_index); } - async fn execute_mock_batch(output_handler: &mut OutputHandler) -> H256 { + async fn execute_mock_batch( + output_handler: &mut OutputHandler, + pool: &ConnectionPool, + ) -> H256 { let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); - let mut updates = UpdatesManager::new(&l1_batch_env, &default_system_env()); + let mut updates = + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()); + pool.connection() + .await + .unwrap() + .blocks_dal() + .insert_l1_batch(l1_batch_env.into_unsealed_header(None)) + .await + .unwrap(); let tx = create_transaction(10, 100); let tx_hash = tx.hash(); @@ -465,6 +510,7 @@ mod tests { tx, tx_result, vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], @@ -530,12 +576,14 @@ mod tests { drop(storage); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Some(Address::default()), 1); + StateKeeperPersistence::new(pool.clone(), Some(Address::default()), 1) + .await + .unwrap(); persistence = persistence.with_tx_insertion().without_protective_reads(); let mut output_handler = OutputHandler::new(Box::new(persistence)); tokio::spawn(l2_block_sealer.run()); - let tx_hash = execute_mock_batch(&mut output_handler).await; + let tx_hash = execute_mock_batch(&mut output_handler, &pool).await; // Check that the transaction is persisted. let mut storage = pool.connection().await.unwrap(); @@ -569,7 +617,9 @@ mod tests { async fn l2_block_sealer_handle_blocking() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Some(Address::default()), 1); + StateKeeperPersistence::new(pool, Some(Address::default()), 1) + .await + .unwrap(); // The first command should be successfully submitted immediately. let mut updates_manager = create_updates_manager(); @@ -620,7 +670,9 @@ mod tests { async fn l2_block_sealer_handle_parallel_processing() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Some(Address::default()), 5); + StateKeeperPersistence::new(pool, Some(Address::default()), 5) + .await + .unwrap(); // 5 L2 block sealing commands can be submitted without blocking. let mut updates_manager = create_updates_manager(); diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 9a57da5e18d..4fc58bce5c9 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -333,12 +333,11 @@ impl L2BlockSealSubtask for InsertTokensSubtask { connection: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { let is_fictive = command.is_l2_block_fictive(); + let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::ExtractAddedTokens, is_fictive); let token_deployer_address = command .l2_legacy_shared_bridge_addr .unwrap_or(L2_NATIVE_TOKEN_VAULT_ADDRESS); - let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::ExtractAddedTokens, is_fictive); let added_tokens = extract_added_tokens(token_deployer_address, &command.l2_block.events); - progress.observe(added_tokens.len()); let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertTokens, is_fictive); @@ -550,7 +549,6 @@ mod tests { virtual_blocks: Default::default(), protocol_version: ProtocolVersionId::latest(), }, - pubdata_params: PubdataParams::default(), first_tx_index: 0, fee_account_address: Default::default(), fee_input: Default::default(), @@ -559,6 +557,7 @@ mod tests { protocol_version: Some(ProtocolVersionId::latest()), l2_legacy_shared_bridge_addr: Default::default(), pre_insert_txs: false, + pubdata_params: PubdataParams::default(), }; // Run. @@ -614,7 +613,6 @@ mod tests { l2_tx_count: 1, fee_account_address: l2_block_seal_command.fee_account_address, base_fee_per_gas: l2_block_seal_command.base_fee_per_gas, - pubdata_params: l2_block_seal_command.pubdata_params, batch_fee_input: l2_block_seal_command.fee_input, base_system_contracts_hashes: l2_block_seal_command.base_system_contracts_hashes, protocol_version: l2_block_seal_command.protocol_version, @@ -622,6 +620,7 @@ mod tests { virtual_blocks: l2_block_seal_command.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(VmVersion::latest()), logs_bloom: Default::default(), + pubdata_params: l2_block_seal_command.pubdata_params, }; connection .protocol_versions_dal() diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index dce0ae090de..7f05bda7a6f 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -132,6 +132,7 @@ impl UpdatesManager { protocol_version: Some(self.protocol_version()), system_logs: finished_batch.final_execution_state.system_logs.clone(), pubdata_input: finished_batch.pubdata_input.clone(), + fee_address: self.fee_account_address, }; let final_bootloader_memory = finished_batch @@ -141,7 +142,7 @@ impl UpdatesManager { transaction .blocks_dal() - .insert_l1_batch( + .mark_l1_batch_as_sealed( &l1_batch, &final_bootloader_memory, self.pending_l1_gas_count(), @@ -382,7 +383,6 @@ impl L2BlockSealCommand { l1_tx_count: l1_tx_count as u16, l2_tx_count: l2_tx_count as u16, fee_account_address: self.fee_account_address, - pubdata_params: self.pubdata_params, base_fee_per_gas: self.base_fee_per_gas, batch_fee_input: self.fee_input, base_system_contracts_hashes: self.base_system_contracts_hashes, @@ -391,6 +391,7 @@ impl L2BlockSealCommand { virtual_blocks: self.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(definite_vm_version), logs_bloom, + pubdata_params: self.pubdata_params, }; let mut connection = strategy.connection().await?; diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 4cf4a7b5a70..ece5b67767f 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{collections::HashMap, time::Duration}; use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; @@ -249,6 +249,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -267,6 +268,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -282,11 +284,11 @@ async fn processing_storage_logs_when_sealing_l2_block() { fair_pubdata_price: 100, }), base_fee_per_gas: 10, - pubdata_params: Default::default(), base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_legacy_shared_bridge_addr: Default::default(), + l2_legacy_shared_bridge_addr: Some(Address::default()), pre_insert_txs: false, + pubdata_params: Default::default(), }; connection_pool .connection() @@ -357,6 +359,7 @@ async fn processing_events_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); } @@ -371,12 +374,12 @@ async fn processing_events_when_sealing_l2_block() { fair_l2_gas_price: 100, fair_pubdata_price: 100, }), - pubdata_params: Default::default(), base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_legacy_shared_bridge_addr: Default::default(), + l2_legacy_shared_bridge_addr: Some(Address::default()), pre_insert_txs: false, + pubdata_params: Default::default(), }; pool.connection() .await @@ -446,26 +449,29 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom .await .unwrap() .expect("no batch params generated"); - let (system_env, l1_batch_env) = l1_batch_params.into_env( + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params.into_env( L2ChainId::default(), BASE_SYSTEM_CONTRACTS.clone(), &cursor, previous_batch_hash, ); - let mut updates = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let tx_hash = tx.hash(); updates.extend_from_executed_transaction( tx.into(), create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], ); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(connection_pool.clone(), Some(Address::default()), 0); + StateKeeperPersistence::new(connection_pool.clone(), Some(Address::default()), 0) + .await + .unwrap(); tokio::spawn(l2_block_sealer.run()); persistence.handle_l2_block(&updates).await.unwrap(); @@ -554,3 +560,87 @@ async fn different_timestamp_for_l2_blocks_in_same_batch(commitment_mode: L1Batc .expect("no new L2 block params"); assert!(l2_block_params.timestamp > current_timestamp); } + +#[test_casing(2, COMMITMENT_MODES)] +#[tokio::test] +async fn continue_unsealed_batch_on_restart(commitment_mode: L1BatchCommitmentMode) { + let connection_pool = ConnectionPool::::test_pool().await; + let tester = Tester::new(commitment_mode); + tester.genesis(&connection_pool).await; + let mut storage = connection_pool.connection().await.unwrap(); + + let (mut mempool, mut mempool_guard) = + tester.create_test_mempool_io(connection_pool.clone()).await; + let (cursor, _) = mempool.initialize().await.unwrap(); + + // Insert a transaction into the mempool in order to open a new batch. + let tx_filter = l2_tx_filter( + &tester.create_batch_fee_input_provider().await, + ProtocolVersionId::latest().into(), + ) + .await + .unwrap(); + let tx = tester.insert_tx( + &mut mempool_guard, + tx_filter.fee_per_gas, + tx_filter.gas_per_pubdata, + ); + storage + .transactions_dal() + .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .await + .unwrap(); + + let old_l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + + // Restart + drop((mempool, mempool_guard, cursor)); + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool.clone()).await; + let (cursor, _) = mempool.initialize().await.unwrap(); + + let new_l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + + assert_eq!(old_l1_batch_params, new_l1_batch_params); +} + +#[test_casing(2, COMMITMENT_MODES)] +#[tokio::test] +async fn insert_unsealed_batch_on_init(commitment_mode: L1BatchCommitmentMode) { + let connection_pool = ConnectionPool::::test_pool().await; + let mut tester = Tester::new(commitment_mode); + tester.genesis(&connection_pool).await; + let fee_input = BatchFeeInput::pubdata_independent(55, 555, 5555); + let tx_result = tester + .insert_l2_block(&connection_pool, 1, 5, fee_input) + .await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; + // Pre-insert L2 block without its unsealed L1 batch counterpart + tester.set_timestamp(2); + tester + .insert_l2_block(&connection_pool, 2, 5, fee_input) + .await; + + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool.clone()).await; + // Initialization is supposed to recognize that the current L1 batch is not present in the DB and + // insert it itself. + let (cursor, _) = mempool.initialize().await.unwrap(); + + // Make sure we are able to fetch the newly inserted batch's params + let l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + assert_eq!(l1_batch_params.fee_input, fee_input); + assert_eq!(l1_batch_params.first_l2_block.timestamp, 2); +} diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 6b7fc260b66..daedbebc75e 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -4,7 +4,7 @@ use std::{slice, sync::Arc, time::Duration}; use zksync_base_token_adjuster::NoOpRatioProvider; use zksync_config::{ - configs::{chain::StateKeeperConfig, eth_sender::PubdataSendingMode, wallets::Wallets}, + configs::{chain::StateKeeperConfig, wallets::Wallets}, GasAdjusterConfig, }; use zksync_contracts::BaseSystemContracts; @@ -25,9 +25,10 @@ use zksync_node_test_utils::{ use zksync_types::{ block::L2BlockHeader, commitment::L1BatchCommitmentMode, - fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV1}, + fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV2}, l2::L2Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, + pubdata_da::PubdataSendingMode, system_contracts::get_system_smart_contracts, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, H256, }; @@ -97,8 +98,13 @@ impl Tester { MainNodeFeeInputProvider::new( gas_adjuster, Arc::new(NoOpRatioProvider::default()), - FeeModelConfig::V1(FeeModelConfigV1 { + FeeModelConfig::V2(FeeModelConfigV2 { minimal_l2_gas_price: self.minimal_l2_gas_price(), + compute_overhead_part: 1.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 10, + max_gas_per_batch: 500_000_000_000, + max_pubdata_per_batch: 100_000_000_000, }), ) } @@ -116,8 +122,13 @@ impl Tester { let batch_fee_input_provider = MainNodeFeeInputProvider::new( gas_adjuster, Arc::new(NoOpRatioProvider::default()), - FeeModelConfig::V1(FeeModelConfigV1 { + FeeModelConfig::V2(FeeModelConfigV2 { minimal_l2_gas_price: self.minimal_l2_gas_price(), + compute_overhead_part: 1.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 10, + max_gas_per_batch: 500_000_000_000, + max_pubdata_per_batch: 100_000_000_000, }), ); @@ -134,10 +145,10 @@ impl Tester { pool, &config, wallets.state_keeper.unwrap().fee_account.address(), - Some(Default::default()), - Default::default(), Duration::from_secs(1), L2ChainId::from(270), + Some(Default::default()), + Default::default(), ) .unwrap(); @@ -158,7 +169,7 @@ impl Tester { patch: 0.into(), }, &self.base_system_contracts, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), L1VerifierConfig::default(), ) .await diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index b40b8f304ff..49a05cc93e3 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -17,8 +17,9 @@ use zksync_multivm::{ use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state::{OwnedStorage, ReadStorageFactory}; use zksync_types::{ - block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, - protocol_version::ProtocolVersionId, utils::display_timestamp, L1BatchNumber, Transaction, + block::L2BlockExecutionData, commitment::PubdataParams, l2::TransactionType, + protocol_upgrade::ProtocolUpgradeTx, protocol_version::ProtocolVersionId, + utils::display_timestamp, L1BatchNumber, Transaction, }; use crate::{ @@ -116,6 +117,7 @@ impl ZkSyncStateKeeper { let PendingBatchData { mut l1_batch_env, mut system_env, + mut pubdata_params, pending_l2_blocks, } = match pending_batch_params { Some(params) => { @@ -132,7 +134,7 @@ impl ZkSyncStateKeeper { } None => { tracing::info!("There is no open pending batch, starting a new empty batch"); - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .wait_for_new_batch_env(&cursor) .await .map_err(|e| e.context("wait_for_new_batch_params()"))?; @@ -140,18 +142,19 @@ impl ZkSyncStateKeeper { l1_batch_env, pending_l2_blocks: Vec::new(), system_env, + pubdata_params, } } }; let protocol_version = system_env.version; - let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let mut protocol_upgrade_tx: Option = self .load_protocol_upgrade_tx(&pending_l2_blocks, protocol_version, l1_batch_env.number) .await?; let mut batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; self.restore_state( &mut *batch_executor, @@ -201,10 +204,11 @@ impl ZkSyncStateKeeper { // Start the new batch. next_cursor.l1_batch += 1; - (system_env, l1_batch_env) = self.wait_for_new_batch_env(&next_cursor).await?; - updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + (system_env, l1_batch_env, pubdata_params) = + self.wait_for_new_batch_env(&next_cursor).await?; + updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -221,6 +225,7 @@ impl ZkSyncStateKeeper { &mut self, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Result>, Error> { let storage = self .storage_factory @@ -230,7 +235,7 @@ impl ZkSyncStateKeeper { .ok_or(Error::Canceled)?; Ok(self .batch_executor - .init_batch(storage, l1_batch_env, system_env)) + .init_batch(storage, l1_batch_env, system_env, pubdata_params)) } /// This function is meant to be called only once during the state-keeper initialization. @@ -327,7 +332,7 @@ impl ZkSyncStateKeeper { async fn wait_for_new_batch_env( &mut self, cursor: &IoCursor, - ) -> Result<(SystemEnv, L1BatchEnv), Error> { + ) -> Result<(SystemEnv, L1BatchEnv, PubdataParams), Error> { // `io.wait_for_new_batch_params(..)` is not cancel-safe; once we get new batch params, we must hold onto them // until we get the rest of parameters from I/O or receive a stop signal. let params = self.wait_for_new_batch_params(cursor).await?; @@ -498,8 +503,9 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -624,8 +630,9 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -685,6 +692,7 @@ impl ZkSyncStateKeeper { tx_result, tx_metrics, compressed_bytecodes, + call_tracer_result, .. } = exec_result else { @@ -704,11 +712,12 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, - vec![], + call_tracer_result, ); Ok(()) } diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index dbe1e4cb977..a17f2670cbb 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -89,20 +89,35 @@ impl MempoolFetcher { .await .context("failed getting pending protocol version")?; - let l2_tx_filter = l2_tx_filter( - self.batch_fee_input_provider.as_ref(), - protocol_version.into(), - ) - .await - .context("failed creating L2 transaction filter")?; + let (fee_per_gas, gas_per_pubdata) = if let Some(unsealed_batch) = storage + .blocks_dal() + .get_unsealed_l1_batch() + .await + .context("failed getting unsealed batch")? + { + let (fee_per_gas, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + unsealed_batch.fee_input, + protocol_version.into(), + ); + (fee_per_gas, gas_per_pubdata as u32) + } else { + let filter = l2_tx_filter( + self.batch_fee_input_provider.as_ref(), + protocol_version.into(), + ) + .await + .context("failed creating L2 transaction filter")?; + + (filter.fee_per_gas, filter.gas_per_pubdata) + }; let transactions = storage .transactions_dal() .sync_mempool( &mempool_info.stashed_accounts, &mempool_info.purged_accounts, - l2_tx_filter.gas_per_pubdata, - l2_tx_filter.fee_per_gas, + gas_per_pubdata, + fee_per_gas, self.sync_batch_size, ) .await diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index e3fe849e802..962cc807318 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -277,6 +277,8 @@ impl L2BlockMaxPayloadSizeSealer { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_utils::time::seconds_since_epoch; use super::*; @@ -287,6 +289,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index efa8d738ad0..7023463df0e 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -16,9 +16,10 @@ use zksync_multivm::interface::{ use zksync_state::OwnedStorage; use zksync_test_account::Account; use zksync_types::{ - fee::Fee, get_code_key, get_known_code_key, utils::storage_key_for_standard_token_balance, - AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, PriorityOpId, StorageLog, - Transaction, H256, L2_BASE_TOKEN_ADDRESS, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, + commitment::PubdataParams, fee::Fee, get_code_key, get_known_code_key, + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, L1BatchNumber, + L2BlockNumber, PriorityOpId, StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, + SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; @@ -35,6 +36,7 @@ pub(crate) fn successful_exec() -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], @@ -51,6 +53,7 @@ impl BatchExecutorFactory for MockBatchExecutor { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { Box::new(Self) } diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index 4a58e9e0a95..45787b18f3c 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -27,8 +27,9 @@ use zksync_multivm::{ use zksync_node_test_utils::create_l2_transaction; use zksync_state::{interface::StorageView, OwnedStorage, ReadStorageFactory}; use zksync_types::{ - fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, protocol_upgrade::ProtocolUpgradeTx, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + commitment::PubdataParams, fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, + protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, Transaction, H256, }; use crate::{ @@ -264,6 +265,7 @@ pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { }, statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], @@ -278,6 +280,7 @@ pub(crate) fn rejected_exec(reason: Halt) -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], @@ -421,6 +424,7 @@ impl BatchExecutorFactory for TestBatchExecutorBuilder { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { let executor = TestBatchExecutor::new(self.txs.pop_front().unwrap(), self.rollback_set.clone()); diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index 4a5a099d977..a1973aaed11 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -58,8 +58,8 @@ pub(crate) fn pending_batch_data(pending_l2_blocks: Vec) - execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), }, + pubdata_params: Default::default(), pending_l2_blocks, } } @@ -73,7 +73,6 @@ pub(super) fn default_system_env() -> SystemEnv { execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), } } @@ -104,7 +103,7 @@ pub(super) fn default_l1_batch_env( pub(super) fn create_updates_manager() -> UpdatesManager { let l1_batch_env = default_l1_batch_env(1, 1, Address::default()); - UpdatesManager::new(&l1_batch_env, &default_system_env()) + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()) } pub(super) fn create_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> Transaction { @@ -140,6 +139,7 @@ pub(super) fn create_execution_result( circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index aa2e22cac48..2979ebbd8c2 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -49,6 +49,8 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_multivm::vm_latest::TransactionVmExt; use zksync_types::{L2BlockNumber, ProtocolVersionId, H256}; @@ -76,6 +78,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index d8673088dc3..27995b384ab 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -1,17 +1,14 @@ use std::collections::HashMap; -use once_cell::sync::Lazy; use zksync_multivm::{ interface::{ Call, CompressedBytecodeInfo, ExecutionResult, L2BlockEnv, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionMetrics, VmExecutionResultAndLogs, }, - vm_latest::TransactionVmExt, + vm_latest::{utils::extract_bytecodes_marked_as_known, TransactionVmExt}, }; -use zksync_system_constants::KNOWN_CODES_STORAGE_ADDRESS; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, - ethabi, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, L2BlockNumber, ProtocolVersionId, StorageLogWithPreviousValue, Transaction, H256, }; @@ -19,27 +16,6 @@ use zksync_utils::bytecode::hash_bytecode; use crate::metrics::KEEPER_METRICS; -/// Extracts all bytecodes marked as known on the system contracts. -fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { - static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "MarkedAsKnown", - &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], - ) - }); - - all_generated_events - .iter() - .filter(|event| { - // Filter events from the deployer contract that match the expected signature. - event.address == KNOWN_CODES_STORAGE_ADDRESS - && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE - }) - .map(|event| event.indexed_topics[1]) - .collect() -} - #[derive(Debug, Clone, PartialEq)] pub struct L2BlockUpdates { pub executed_transactions: Vec, @@ -104,6 +80,7 @@ impl L2BlockUpdates { self.block_execution_metrics += execution_metrics; } + #[allow(clippy::too_many_arguments)] pub(crate) fn extend_from_executed_transaction( &mut self, tx: Transaction, @@ -111,6 +88,7 @@ impl L2BlockUpdates { tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, call_traces: Vec, ) { let saved_factory_deps = @@ -145,12 +123,15 @@ impl L2BlockUpdates { // Get transaction factory deps let factory_deps = &tx.execute.factory_deps; - let tx_factory_deps: HashMap<_, _> = factory_deps + let mut tx_factory_deps: HashMap<_, _> = factory_deps .iter() - .map(|bytecode| (hash_bytecode(bytecode), bytecode)) + .map(|bytecode| (hash_bytecode(bytecode), bytecode.clone())) .collect(); + // Ensure that *dynamic* factory deps (ones that may be created when executing EVM contracts) + // are added into the lookup map as well. + tx_factory_deps.extend(new_known_factory_deps); - // Save all bytecodes that were marked as known on the bootloader + // Save all bytecodes that were marked as known in the bootloader let known_bytecodes = saved_factory_deps.into_iter().map(|bytecode_hash| { let bytecode = tx_factory_deps.get(&bytecode_hash).unwrap_or_else(|| { panic!( @@ -230,6 +211,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 9d12d86de6f..b1bd35c921c 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use zksync_contracts::BaseSystemContractsHashes; use zksync_multivm::{ interface::{ @@ -8,7 +10,7 @@ use zksync_multivm::{ }; use zksync_types::{ block::BlockGasCount, commitment::PubdataParams, fee_model::BatchFeeInput, Address, - L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, + L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, }; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; @@ -30,20 +32,24 @@ pub mod l2_block_updates; #[derive(Debug)] pub struct UpdatesManager { batch_timestamp: u64, - fee_account_address: Address, + pub fee_account_address: Address, batch_fee_input: BatchFeeInput, base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, - pubdata_params: PubdataParams, protocol_version: ProtocolVersionId, storage_view_cache: Option, pub l1_batch: L1BatchUpdates, pub l2_block: L2BlockUpdates, pub storage_writes_deduplicator: StorageWritesDeduplicator, + pubdata_params: PubdataParams, } impl UpdatesManager { - pub fn new(l1_batch_env: &L1BatchEnv, system_env: &SystemEnv) -> Self { + pub fn new( + l1_batch_env: &L1BatchEnv, + system_env: &SystemEnv, + pubdata_params: PubdataParams, + ) -> Self { let protocol_version = system_env.version; Self { batch_timestamp: l1_batch_env.timestamp, @@ -52,7 +58,6 @@ impl UpdatesManager { base_fee_per_gas: get_batch_base_fee(l1_batch_env, protocol_version.into()), protocol_version, base_system_contract_hashes: system_env.base_system_smart_contracts.hashes(), - pubdata_params: system_env.pubdata_params, l1_batch: L1BatchUpdates::new(l1_batch_env.number), l2_block: L2BlockUpdates::new( l1_batch_env.first_l2_block.timestamp, @@ -63,6 +68,7 @@ impl UpdatesManager { ), storage_writes_deduplicator: StorageWritesDeduplicator::new(), storage_view_cache: None, + pubdata_params, } } @@ -95,11 +101,11 @@ impl UpdatesManager { fee_account_address: self.fee_account_address, fee_input: self.batch_fee_input, base_fee_per_gas: self.base_fee_per_gas, - pubdata_params: self.pubdata_params, base_system_contracts_hashes: self.base_system_contract_hashes, protocol_version: Some(self.protocol_version), l2_legacy_shared_bridge_addr, pre_insert_txs, + pubdata_params: self.pubdata_params, } } @@ -107,11 +113,13 @@ impl UpdatesManager { self.protocol_version } + #[allow(clippy::too_many_arguments)] pub fn extend_from_executed_transaction( &mut self, tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, call_traces: Vec, @@ -127,6 +135,7 @@ impl UpdatesManager { tx_l1_gas_this_tx, execution_metrics, compressed_bytecodes, + new_known_factory_deps, call_traces, ); latency.observe(); @@ -210,11 +219,11 @@ pub struct L2BlockSealCommand { pub base_system_contracts_hashes: BaseSystemContractsHashes, pub protocol_version: Option, pub l2_legacy_shared_bridge_addr: Option
, - pub pubdata_params: PubdataParams, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB /// before they are included into L2 blocks. pub pre_insert_txs: bool, + pub pubdata_params: PubdataParams, } #[cfg(test)] @@ -237,6 +246,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), new_block_gas_count(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/tee_verifier_input_producer/Cargo.toml b/core/node/tee_verifier_input_producer/Cargo.toml deleted file mode 100644 index 7a5a4de5d0c..00000000000 --- a/core/node/tee_verifier_input_producer/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "zksync_tee_verifier_input_producer" -description = "ZKsync TEE verifier input producer" -version.workspace = true -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true - -[dependencies] -zksync_dal.workspace = true -zksync_object_store.workspace = true -zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true -zksync_tee_verifier.workspace = true -zksync_types.workspace = true -zksync_utils.workspace = true -zksync_vm_executor.workspace = true -vise.workspace = true - -anyhow.workspace = true -async-trait.workspace = true -tracing.workspace = true -tokio = { workspace = true, features = ["time"] } diff --git a/core/node/tee_verifier_input_producer/README.md b/core/node/tee_verifier_input_producer/README.md deleted file mode 100644 index 75a2029985c..00000000000 --- a/core/node/tee_verifier_input_producer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `zksync_tee_verifier_input_producer` - -Component responsible for producing inputs for verification of execution in TEE. diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs deleted file mode 100644 index 8a99aa07ae5..00000000000 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! Produces input for a TEE Verifier -//! -//! Extract all data needed to re-execute and verify an L1Batch without accessing -//! the DB and/or the object store. -//! -//! For testing purposes, the L1 batch is re-executed immediately for now. -//! Eventually, this component will only extract the inputs and send them to another -//! machine over a "to be defined" channel, e.g., save them to an object store. - -use std::{sync::Arc, time::Instant}; - -use anyhow::Context; -use async_trait::async_trait; -use tokio::task::JoinHandle; -use zksync_dal::{tee_verifier_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Core, CoreDal}; -use zksync_object_store::ObjectStore; -use zksync_prover_interface::inputs::{ - TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths, -}; -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier::Verify; -use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; -use zksync_utils::u256_to_h256; -use zksync_vm_executor::storage::L1BatchParamsProvider; - -use self::metrics::METRICS; - -mod metrics; - -/// Component that extracts all data (from DB) necessary to run a TEE Verifier. -#[derive(Debug)] -pub struct TeeVerifierInputProducer { - connection_pool: ConnectionPool, - l2_chain_id: L2ChainId, - object_store: Arc, -} - -impl TeeVerifierInputProducer { - pub async fn new( - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - Ok(TeeVerifierInputProducer { - connection_pool, - object_store, - l2_chain_id, - }) - } - - async fn process_job_impl( - l1_batch_number: L1BatchNumber, - started_at: Instant, - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - let prepare_basic_circuits_job: WitnessInputMerklePaths = object_store - .get(l1_batch_number) - .await - .context("failed to get PrepareBasicCircuitsJob from object store")?; - - let mut connection = connection_pool - .connection() - .await - .context("failed to get connection for TeeVerifierInputProducer")?; - - let l2_blocks_execution_data = connection - .transactions_dal() - .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) - .await?; - - let l1_batch_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? - .unwrap(); - - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) - .await - .context("failed initializing L1 batch params provider")?; - - // In the state keeper, this value is used to reject execution. - // All batches have already been executed by State Keeper. - // This means we don't want to reject any execution, therefore we're using MAX as an allow all. - let validation_computational_gas_limit = u32::MAX; - - let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_env( - &mut connection, - l1_batch_number, - validation_computational_gas_limit, - l2_chain_id, - ) - .await? - .with_context(|| format!("expected L1 batch #{l1_batch_number} to be sealed"))?; - - let used_contract_hashes = l1_batch_header - .used_contract_hashes - .into_iter() - .map(u256_to_h256) - .collect(); - - // `get_factory_deps()` returns the bytecode in chunks of `Vec<[u8; 32]>`, - // but `fn store_factory_dep(&mut self, hash: H256, bytecode: Vec)` in `InMemoryStorage` wants flat byte vecs. - pub fn into_flattened(data: Vec<[T; N]>) -> Vec { - let mut new = Vec::new(); - for slice in data.iter() { - new.extend_from_slice(slice); - } - new - } - - let used_contracts = connection - .factory_deps_dal() - .get_factory_deps(&used_contract_hashes) - .await - .into_iter() - .map(|(hash, bytes)| (u256_to_h256(hash), into_flattened(bytes))) - .collect(); - - tracing::info!("Started execution of l1_batch: {l1_batch_number:?}"); - - let tee_verifier_input = V1TeeVerifierInput::new( - prepare_basic_circuits_job, - l2_blocks_execution_data, - l1_batch_env, - system_env, - used_contracts, - ); - - // TODO (SEC-263): remove these 2 lines after successful testnet runs - tee_verifier_input.clone().verify()?; - tracing::info!("Looks like we verified {l1_batch_number} correctly"); - - tracing::info!("Finished execution of l1_batch: {l1_batch_number:?}"); - - METRICS.process_batch_time.observe(started_at.elapsed()); - tracing::debug!( - "TeeVerifierInputProducer took {:?} for L1BatchNumber {}", - started_at.elapsed(), - l1_batch_number.0 - ); - - Ok(TeeVerifierInput::new(tee_verifier_input)) - } -} - -#[async_trait] -impl JobProcessor for TeeVerifierInputProducer { - type Job = L1BatchNumber; - type JobId = L1BatchNumber; - type JobArtifacts = TeeVerifierInput; - const SERVICE_NAME: &'static str = "tee_verifier_input_producer"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut connection = self.connection_pool.connection().await?; - let l1_batch_to_process = connection - .tee_verifier_input_producer_dal() - .get_next_tee_verifier_input_producer_job() - .await - .context("failed to get next basic witness input producer job")?; - Ok(l1_batch_to_process.map(|number| (number, number))) - } - - async fn save_failure(&self, job_id: Self::JobId, started_at: Instant, error: String) { - let attempts = self - .connection_pool - .connection() - .await - .unwrap() - .tee_verifier_input_producer_dal() - .mark_job_as_failed(job_id, started_at, error) - .await - .expect("errored whilst marking job as failed"); - if let Some(tries) = attempts { - tracing::warn!("Failed to process job: {job_id:?}, after {tries} tries."); - } else { - tracing::warn!("L1 Batch {job_id:?} was processed successfully by another worker."); - } - } - - async fn process_job( - &self, - _job_id: &Self::JobId, - job: Self::Job, - started_at: Instant, - ) -> JoinHandle> { - let l2_chain_id = self.l2_chain_id; - let connection_pool = self.connection_pool.clone(); - let object_store = self.object_store.clone(); - tokio::task::spawn(async move { - Self::process_job_impl( - job, - started_at, - connection_pool.clone(), - object_store, - l2_chain_id, - ) - .await - }) - } - - async fn save_result( - &self, - job_id: Self::JobId, - started_at: Instant, - artifacts: Self::JobArtifacts, - ) -> anyhow::Result<()> { - let observer: vise::LatencyObserver = METRICS.upload_input_time.start(); - let object_path = self - .object_store - .put(job_id, &artifacts) - .await - .context("failed to upload artifacts for TeeVerifierInputProducer")?; - observer.observe(); - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - let mut transaction = connection - .start_transaction() - .await - .context("failed to acquire DB transaction for TeeVerifierInputProducer")?; - transaction - .tee_verifier_input_producer_dal() - .mark_job_as_successful(job_id, started_at, &object_path) - .await - .context("failed to mark job as successful for TeeVerifierInputProducer")?; - transaction - .tee_proof_generation_dal() - .insert_tee_proof_generation_job(job_id, TeeType::Sgx) - .await?; - transaction - .commit() - .await - .context("failed to commit DB transaction for TeeVerifierInputProducer")?; - METRICS.block_number_processed.set(job_id.0 as u64); - Ok(()) - } - - fn max_attempts(&self) -> u32 { - JOB_MAX_ATTEMPT as u32 - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - connection - .tee_verifier_input_producer_dal() - .get_tee_verifier_input_producer_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for TeeVerifierInputProducer") - } -} diff --git a/core/node/tee_verifier_input_producer/src/metrics.rs b/core/node/tee_verifier_input_producer/src/metrics.rs deleted file mode 100644 index 362804d338e..00000000000 --- a/core/node/tee_verifier_input_producer/src/metrics.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Metrics - -use std::time::Duration; - -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "tee_verifier_input_producer")] -pub(crate) struct TeeVerifierInputProducerMetrics { - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub process_batch_time: Histogram, - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub upload_input_time: Histogram, - pub block_number_processed: Gauge, -} - -#[vise::register] -pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 3caadaaf573..86ce3aadd9a 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -39,13 +39,13 @@ pub fn create_l2_block(number: u32) -> L2BlockHeader { base_fee_per_gas: 100, batch_fee_input: BatchFeeInput::l1_pegged(100, 100), fee_account_address: Address::zero(), - pubdata_params: Default::default(), gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(ProtocolVersionId::latest().into()), base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } @@ -57,6 +57,7 @@ pub fn create_l1_batch(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: None, }, ProtocolVersionId::latest(), ); @@ -89,6 +90,7 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: BaseSystemContractsHashes::default().bootloader, default_aa_code_hash: BaseSystemContractsHashes::default().default_aa, + evm_emulator_code_hash: BaseSystemContractsHashes::default().evm_emulator, protocol_version: Some(ProtocolVersionId::latest()), }, aux_data_hash: H256::zero(), @@ -115,13 +117,10 @@ pub fn l1_batch_metadata_to_commitment_artifacts( commitment: metadata.commitment, }, l2_l1_merkle_root: metadata.l2_l1_merkle_root, - local_root: metadata.local_root.unwrap(), - aggregation_root: metadata.aggregation_root.unwrap(), compressed_state_diffs: Some(metadata.state_diffs_compressed.clone()), compressed_initial_writes: metadata.initial_writes_compressed.clone(), compressed_repeated_writes: metadata.repeated_writes_compressed.clone(), zkporter_is_available: ZKPORTER_IS_AVAILABLE, - state_diff_hash: metadata.state_diff_hash.unwrap(), aux_commitments: match ( metadata.bootloader_initial_content_commitment, metadata.events_queue_commitment, @@ -134,6 +133,9 @@ pub fn l1_batch_metadata_to_commitment_artifacts( } _ => None, }, + local_root: metadata.local_root.unwrap(), + aggregation_root: metadata.aggregation_root.unwrap(), + state_diff_hash: metadata.state_diff_hash.unwrap(), } } @@ -214,18 +216,19 @@ impl Snapshot { gas_per_pubdata_limit: get_max_gas_per_pubdata_byte( genesis_params.minor_protocol_version().into(), ), - pubdata_params: Default::default(), base_system_contracts_hashes: contracts.hashes(), protocol_version: Some(genesis_params.minor_protocol_version()), virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; Snapshot { l1_batch, l2_block, factory_deps: [&contracts.bootloader, &contracts.default_aa] .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, zksync_utils::be_words_to_bytes(&c.code))) .collect(), storage_logs, diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 6c2933635b4..a2cf126f549 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -209,6 +209,7 @@ async fn get_updates_manager_witness_input_data( ) -> anyhow::Result { let initial_heap_content = output.batch.final_bootloader_memory.clone().unwrap(); // might be just empty let default_aa = system_env.base_system_smart_contracts.hashes().default_aa; + let evm_emulator = system_env.base_system_smart_contracts.hashes().evm_emulator; let bootloader = system_env.base_system_smart_contracts.hashes().bootloader; let bootloader_code_bytes = connection .factory_deps_dal() @@ -240,6 +241,22 @@ async fn get_updates_manager_witness_input_data( used_bytecodes.insert(account_code_hash, account_bytecode); } + let evm_emulator_code_hash = if let Some(evm_emulator) = evm_emulator { + let evm_emulator_code_hash = h256_to_u256(evm_emulator); + if used_contract_hashes.contains(&evm_emulator_code_hash) { + let evm_emulator_bytecode = connection + .factory_deps_dal() + .get_sealed_factory_dep(evm_emulator) + .await? + .ok_or_else(|| anyhow!("EVM emulator bytecode should exist"))?; + let evm_emulator_bytecode = bytes_to_chunks(&evm_emulator_bytecode); + used_bytecodes.insert(evm_emulator_code_hash, evm_emulator_bytecode); + } + Some(evm_emulator_code_hash) + } else { + None + }; + let storage_refunds = output.batch.final_execution_state.storage_refunds.clone(); let pubdata_costs = output.batch.final_execution_state.pubdata_costs.clone(); let witness_block_state = WitnessStorageState { @@ -254,6 +271,7 @@ async fn get_updates_manager_witness_input_data( protocol_version: system_env.version, bootloader_code, default_account_code_hash: account_code_hash, + evm_emulator_code_hash, storage_refunds, pubdata_costs, witness_block_state, diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 4f7ac1f9728..dbd218c8dc5 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -82,6 +82,7 @@ impl VmRunner { storage, batch_data.l1_batch_env.clone(), batch_data.system_env.clone(), + batch_data.pubdata_params, ); let mut output_handler = self .output_handler_factory diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index 2285455ba24..9ab4ed87b9f 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -13,7 +13,9 @@ use zksync_state::{ AsyncCatchupTask, BatchDiff, OwnedStorage, RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, RocksdbWithMemory, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2ChainId}; +use zksync_types::{ + block::L2BlockExecutionData, commitment::PubdataParams, L1BatchNumber, L2ChainId, +}; use zksync_vm_executor::storage::L1BatchParamsProvider; use zksync_vm_interface::{L1BatchEnv, SystemEnv}; @@ -106,6 +108,8 @@ pub struct BatchExecuteData { pub l1_batch_env: L1BatchEnv, /// Execution process parameters. pub system_env: SystemEnv, + /// Pubdata building parameters. + pub pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub l2_blocks: Vec, } @@ -394,7 +398,7 @@ pub(crate) async fn load_batch_execute_data( l1_batch_params_provider: &L1BatchParamsProvider, chain_id: L2ChainId, ) -> anyhow::Result> { - let Some((system_env, l1_batch_env)) = l1_batch_params_provider + let Some((system_env, l1_batch_env, pubdata_params)) = l1_batch_params_provider .load_l1_batch_env( conn, l1_batch_number, @@ -415,6 +419,7 @@ pub(crate) async fn load_batch_execute_data( Ok(Some(BatchExecuteData { l1_batch_env, system_env, + pubdata_params, l2_blocks, })) } diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index cc96353e5c3..6eba504deec 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -323,6 +323,7 @@ async fn store_l1_batches( .iter() .map(|contract| hash_bytecode(&contract.bytecode)) .chain([genesis_params.base_system_contracts().hashes().default_aa]) + .chain(genesis_params.base_system_contracts().hashes().evm_emulator) .map(h256_to_u256) .collect(); diff --git a/core/node/vm_runner/src/tests/output_handler.rs b/core/node/vm_runner/src/tests/output_handler.rs index 131089d0f79..f57814ea449 100644 --- a/core/node/vm_runner/src/tests/output_handler.rs +++ b/core/node/vm_runner/src/tests/output_handler.rs @@ -66,12 +66,12 @@ impl OutputHandlerTester { code: vec![], hash: Default::default(), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: 0, chain_id: Default::default(), - pubdata_params: Default::default(), }; let mut output_handler = self diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 462404af606..8567be6d6d3 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -84,9 +84,9 @@ export async function getExternalNodeHealth(url: string) { } } -export async function dropNodeData(env: { [key: string]: string }, useZkSupervisor?: boolean, chain?: string) { - if (useZkSupervisor) { - let cmd = 'zk_inception external-node init'; +export async function dropNodeData(env: { [key: string]: string }, useZkStack?: boolean, chain?: string) { + if (useZkStack) { + let cmd = 'zkstack external-node init'; cmd += chain ? ` --chain ${chain}` : ''; await executeNodeCommand(env, cmd); } else { @@ -176,7 +176,7 @@ export class NodeProcess { logsFile: FileHandle | string, pathToHome: string, components: NodeComponents = NodeComponents.STANDARD, - useZkInception?: boolean, + useZkStack?: boolean, chain?: string ) { const logs = typeof logsFile === 'string' ? await fs.open(logsFile, 'a') : logsFile; @@ -186,7 +186,7 @@ export class NodeProcess { stdio: ['ignore', logs.fd, logs.fd], cwd: pathToHome, env, - useZkInception, + useZkStack, chain }); diff --git a/core/tests/recovery-test/src/utils.ts b/core/tests/recovery-test/src/utils.ts index 98c6b6d4405..c60f5603f17 100644 --- a/core/tests/recovery-test/src/utils.ts +++ b/core/tests/recovery-test/src/utils.ts @@ -48,19 +48,19 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception external-node run'; + if (useZkStack) { + command = 'zkstack external-node run'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'zk external-node --'; diff --git a/core/tests/recovery-test/tests/snapshot-recovery.test.ts b/core/tests/recovery-test/tests/snapshot-recovery.test.ts index cadf146c522..eca0da78d78 100644 --- a/core/tests/recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/recovery-test/tests/snapshot-recovery.test.ts @@ -458,10 +458,10 @@ async function decompressGzip(filePath: string): Promise { }); } -async function createSnapshot(zkSupervisor: boolean) { +async function createSnapshot(useZkStack: boolean) { let command = ''; - if (zkSupervisor) { - command = `zk_supervisor snapshot create`; + if (useZkStack) { + command = `zkstack dev snapshot create`; command += ` --chain ${fileConfig.chain}`; } else { command = `zk run snapshots-creator`; diff --git a/core/tests/revert-test/tests/utils.ts b/core/tests/revert-test/tests/utils.ts index ea8a45b97c3..fe5cb40799a 100644 --- a/core/tests/revert-test/tests/utils.ts +++ b/core/tests/revert-test/tests/utils.ts @@ -51,19 +51,19 @@ export function runServerInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; if (chain) { command += ` --chain ${chain}`; } @@ -78,19 +78,19 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception external-node run'; + if (useZkStack) { + command = 'zkstack external-node run'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'zk external-node'; @@ -334,7 +334,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env: env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); @@ -362,7 +362,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index d0c97abab72..cfb539c0e0f 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -2,7 +2,7 @@ use ethabi::Token; use zksync_contracts::{ deployer_contract, load_contract, test_contracts::LoadnextContractExecutionParams, }; -use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; +use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_system_constants::{ CONTRACT_DEPLOYER_ADDRESS, DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, @@ -54,6 +54,12 @@ impl Account { Self::new(K256PrivateKey::random_using(rng)) } + /// Creates an account deterministically from the provided seed. + pub fn from_seed(seed: u32) -> Self { + let private_key_bytes = H256::from_low_u64_be(u64::from(seed) + 1); + Self::new(K256PrivateKey::from_bytes(private_key_bytes).unwrap()) + } + pub fn get_l2_tx_for_execute(&mut self, execute: Execute, fee: Option) -> Transaction { let tx = self.get_l2_tx_for_execute_with_nonce(execute, fee, self.nonce); self.nonce += 1; @@ -154,7 +160,7 @@ impl Account { let max_fee_per_gas = U256::from(0u32); let gas_limit = U256::from(20_000_000); let factory_deps = execute.factory_deps; - abi::Transaction::L1 { + let tx = abi::Transaction::L1 { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&self.address), @@ -186,9 +192,8 @@ impl Account { .into(), factory_deps, eth_block: 0, - } - .try_into() - .unwrap() + }; + Transaction::from_abi(tx, false).unwrap() } pub fn get_test_contract_transaction( @@ -255,8 +260,8 @@ impl Account { PrivateKeySigner::new(self.private_key.clone()) } - pub async fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { + pub fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { let pk_signer = self.get_pk_signer(); - pk_signer.sign_transaction(tx).await.unwrap() + pk_signer.sign_transaction(tx) } } diff --git a/core/tests/ts-integration/src/utils.ts b/core/tests/ts-integration/src/utils.ts index 128d0be57d0..bb6fa93757e 100644 --- a/core/tests/ts-integration/src/utils.ts +++ b/core/tests/ts-integration/src/utils.ts @@ -20,21 +20,21 @@ export function runServerInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: ProcessEnvOptions['cwd']; env?: ProcessEnvOptions['env']; - useZkInception?: boolean; + useZkStack?: boolean; newL1GasPrice?: string; newPubdataPrice?: string; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; if (chain) { command += ` --chain ${chain}`; } @@ -167,7 +167,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env: env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index 00b856cea12..9db4ed211e9 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -189,7 +189,8 @@ describe.skip('web3 API compatibility tests', () => { ['eth_getCompilers', [], []], ['eth_hashrate', [], '0x0'], ['eth_mining', [], false], - ['eth_getUncleCountByBlockNumber', ['0x0'], '0x0'] + ['eth_getUncleCountByBlockNumber', ['0x0'], '0x0'], + ['eth_maxPriorityFeePerGas', [], '0x0'] ])('Should test bogus web3 methods (%s)', async (method: string, input: string[], output: string) => { await expect(alice.provider.send(method, input)).resolves.toEqual(output); }); @@ -271,7 +272,8 @@ describe.skip('web3 API compatibility tests', () => { const eip1559ApiReceipt = await alice.provider.getTransaction(eip1559Tx.hash); expect(eip1559ApiReceipt.maxFeePerGas).toEqual(eip1559Tx.maxFeePerGas!); - expect(eip1559ApiReceipt.maxPriorityFeePerGas).toEqual(eip1559Tx.maxPriorityFeePerGas!); + // `ethers` will use value provided by `eth_maxPriorityFeePerGas`, and we return 0 there. + expect(eip1559ApiReceipt.maxPriorityFeePerGas).toEqual(0n); }); test('Should test getFilterChanges for pending transactions', async () => { diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index 7f7974205dc..5abae0b89d3 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -9,7 +9,7 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { scaledGasPrice, waitForBlockToBeFinalizedOnL1 } from '../src/helpers'; -const SECONDS = 1000; +const SECONDS = 2000; jest.setTimeout(100 * SECONDS); describe('base ERC20 contract checks', () => { diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index b6b9672750b..6cfb85fa027 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -16,7 +16,7 @@ import * as elliptic from 'elliptic'; import { RetryProvider } from '../src/retry-provider'; const SECONDS = 1000; -jest.setTimeout(300 * SECONDS); +jest.setTimeout(400 * SECONDS); // TODO: Leave only important ones. const contracts = { diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 92bbfff1965..c9862c58507 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -280,7 +280,6 @@ testFees('Test fees', function () { }); afterAll(async () => { - await testMaster.deinitialize(); await mainNode.killAndWaitForShutdown(); // Returning the pubdata price to the default one @@ -289,6 +288,7 @@ testFees('Test fees', function () { deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); mainNode = await mainNodeSpawner.spawnMainNode(); + await testMaster.deinitialize(); __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNode.proc.pid!); }); }); diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 2e223b9d744..4065480b121 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -64,9 +64,21 @@ describe('Upgrade test', function () { complexUpgraderAddress = '0x000000000000000000000000000000000000800f'; if (fileConfig.loadFromFile) { - const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); - const contractsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'contracts.yaml' }); - const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); + const generalConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'general.yaml' + }); + const contractsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'contracts.yaml' + }); + const secretsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'secrets.yaml' + }); ethProviderAddress = secretsConfig.l1.l1_rpc_url; web3JsonRpc = generalConfig.api.web3_json_rpc.http_url; @@ -89,7 +101,11 @@ describe('Upgrade test', function () { alice = tester.emptyWallet(); if (fileConfig.loadFromFile) { - const chainWalletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); + const chainWalletConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'wallets.yaml' + }); adminGovWallet = new ethers.Wallet(chainWalletConfig.governor.private_key, alice._providerL1()); @@ -144,7 +160,7 @@ describe('Upgrade test', function () { components: serverComponents, stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); // Server may need some time to recompile if it's a cold run, so wait for it. @@ -220,8 +236,15 @@ describe('Upgrade test', function () { }); step('Send l1 tx for saving new bootloader', async () => { - const path = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; - const bootloaderCode = ethers.hexlify(fs.readFileSync(path)); + const path = `${pathToHome}/contracts/system-contracts/zkout/playground_batch.yul/contracts-preprocessed/bootloader/playground_batch.yul.json`; + let bootloaderCode; + if (fs.existsSync(path)) { + bootloaderCode = '0x'.concat(require(path).bytecode.object); + } else { + const legacyPath = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; + bootloaderCode = ethers.hexlify(fs.readFileSync(legacyPath)); + } + bootloaderHash = ethers.hexlify(zksync.utils.hashBytecode(bootloaderCode)); const txHandle = await tester.syncWallet.requestExecute({ contractAddress: ethers.ZeroAddress, @@ -354,7 +377,7 @@ describe('Upgrade test', function () { components: serverComponents, stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); await utils.sleep(10); diff --git a/core/tests/upgrade-test/tests/utils.ts b/core/tests/upgrade-test/tests/utils.ts index 7a7829caf86..2972f8411f5 100644 --- a/core/tests/upgrade-test/tests/utils.ts +++ b/core/tests/upgrade-test/tests/utils.ts @@ -7,19 +7,19 @@ export function runServerInBackground({ components, stdio, cwd, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }) { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'cd $ZKSYNC_HOME && cargo run --bin zksync_server --release --'; @@ -71,8 +71,8 @@ export interface Contracts { stateTransitonManager: any; } -export function initContracts(pathToHome: string, zkToolbox: boolean): Contracts { - if (zkToolbox) { +export function initContracts(pathToHome: string, zkStack: boolean): Contracts { + if (zkStack) { const CONTRACTS_FOLDER = `${pathToHome}/contracts`; return { l1DefaultUpgradeAbi: new ethers.Interface( @@ -88,10 +88,10 @@ export function initContracts(pathToHome: string, zkToolbox: boolean): Contracts require(`${CONTRACTS_FOLDER}/l1-contracts/out/ChainAdmin.sol/ChainAdmin.json`).abi ), l2ForceDeployUpgraderAbi: new ethers.Interface( - require(`${CONTRACTS_FOLDER}/l2-contracts/artifacts-zk/contracts/ForceDeployUpgrader.sol/ForceDeployUpgrader.json`).abi + require(`${CONTRACTS_FOLDER}/l2-contracts/zkout/ForceDeployUpgrader.sol/ForceDeployUpgrader.json`).abi ), complexUpgraderAbi: new ethers.Interface( - require(`${CONTRACTS_FOLDER}/system-contracts/artifacts-zk/contracts-preprocessed/ComplexUpgrader.sol/ComplexUpgrader.json`).abi + require(`${CONTRACTS_FOLDER}/system-contracts/zkout/ComplexUpgrader.sol/ComplexUpgrader.json`).abi ), counterBytecode: require(`${pathToHome}/core/tests/ts-integration/artifacts-zk/contracts/counter/counter.sol/Counter.json`) diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 4586c637e12..59c1e21493b 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -11,6 +11,7 @@ zksync_multivm.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_vlog.workspace = true +zksync_vm2.workspace = true criterion.workspace = true once_cell.workspace = true diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index 6b8965afa4f..8cbb9f10dd8 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -31,4 +31,5 @@ make_functions_and_main!( write_and_decode => write_and_decode_legacy, event_spam => event_spam_legacy, slot_hash_collision => slot_hash_collision_legacy, + heap_read_write => heap_read_write_legacy, ); diff --git a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs index faf72a18f45..c274b039c9b 100644 --- a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs +++ b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs @@ -25,14 +25,7 @@ fn main() { .keys() .collect::>() .intersection(&iai_after.keys().collect()) - .filter_map(|&name| { - let diff = percent_difference(iai_before[name], iai_after[name]); - if diff.abs() > 2. { - Some((name, format!("{:+.1}%", diff))) - } else { - None - } - }) + .map(|&name| (name, percent_difference(iai_before[name], iai_after[name]))) .collect::>(); let duration_changes = opcodes_before @@ -47,12 +40,17 @@ fn main() { let mut nonzero_diff = false; - for name in perf_changes.keys().collect::>().union( - &duration_changes - .iter() - .filter_map(|(key, value)| (*value != 0).then_some(key)) - .collect(), - ) { + for name in perf_changes + .iter() + .filter_map(|(key, value)| (value.abs() > 2.).then_some(key)) + .collect::>() + .union( + &duration_changes + .iter() + .filter_map(|(key, value)| (*value != 0).then_some(key)) + .collect(), + ) + { // write the header before writing the first line of diff if !nonzero_diff { println!("Benchmark name | change in estimated runtime | change in number of opcodes executed \n--- | --- | ---"); @@ -63,7 +61,10 @@ fn main() { println!( "{} | {} | {}", name, - perf_changes.get(**name).unwrap_or(&n_a.clone()), + perf_changes + .get(**name) + .map(|percent| format!("{:+.1}%", percent)) + .unwrap_or(n_a.clone()), duration_changes .get(**name) .map(|abs_diff| format!( diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs index f9bb04c01bf..96208007fd9 100644 --- a/core/tests/vm-benchmark/src/bin/instruction_counts.rs +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -1,11 +1,16 @@ //! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. -use vm_benchmark::{BenchmarkingVm, BYTECODES}; +use vm_benchmark::{BenchmarkingVmFactory, Fast, Legacy, BYTECODES}; fn main() { for bytecode in BYTECODES { let tx = bytecode.deploy_tx(); let name = bytecode.name; - println!("{name} {}", BenchmarkingVm::new().instruction_count(&tx)); + println!("{name} {}", Fast::<()>::count_instructions(&tx)); + println!( + "{} {}", + name.to_string() + "_legacy", + Legacy::count_instructions(&tx) + ); } } diff --git a/core/tests/vm-benchmark/src/instruction_counter.rs b/core/tests/vm-benchmark/src/instruction_counter.rs index 48b1e3527ad..0899c4c9171 100644 --- a/core/tests/vm-benchmark/src/instruction_counter.rs +++ b/core/tests/vm-benchmark/src/instruction_counter.rs @@ -13,7 +13,6 @@ pub struct InstructionCounter { /// A tracer that counts the number of instructions executed by the VM. impl InstructionCounter { - #[allow(dead_code)] // FIXME: re-enable instruction counting once new tracers are merged pub fn new(output: Rc>) -> Self { Self { count: 0, output } } diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index 410c0e071b4..922fb24512b 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -5,12 +5,12 @@ use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView}, - ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + ExecutionResult, InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, - vm_fast, vm_latest, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled}, + vm_fast, + vm_latest::{self, constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled, ToTracerPointer}, zk_evm_latest::ethereum_types::{Address, U256}, }; use zksync_types::{ @@ -20,7 +20,7 @@ use zksync_types::{ }; use zksync_utils::bytecode::hash_bytecode; -use crate::transaction::PRIVATE_KEY; +use crate::{instruction_counter::InstructionCounter, transaction::PRIVATE_KEY}; static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); @@ -72,16 +72,19 @@ pub trait BenchmarkingVmFactory { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance; + + /// Counts instructions executed by the VM while processing the transaction. + fn count_instructions(tx: &Transaction) -> usize; } /// Factory for the new / fast VM. #[derive(Debug)] -pub struct Fast(()); +pub struct Fast(Tr); -impl BenchmarkingVmFactory for Fast { +impl BenchmarkingVmFactory for Fast { const LABEL: VmLabel = VmLabel::Fast; - type Instance = vm_fast::Vm<&'static InMemoryStorage>; + type Instance = vm_fast::Vm<&'static InMemoryStorage, Tr>; fn create( batch_env: L1BatchEnv, @@ -90,6 +93,29 @@ impl BenchmarkingVmFactory for Fast { ) -> Self::Instance { vm_fast::Vm::custom(batch_env, system_env, storage) } + + fn count_instructions(tx: &Transaction) -> usize { + let mut vm = BenchmarkingVm::>::default(); + vm.0.push_transaction(tx.clone()); + + #[derive(Default)] + struct InstructionCount(usize); + impl vm_fast::Tracer for InstructionCount { + fn before_instruction< + OP: zksync_vm2::interface::OpcodeType, + S: zksync_vm2::interface::GlobalStateInterface, + >( + &mut self, + _: &mut S, + ) { + self.0 += 1; + } + } + let mut tracer = InstructionCount(0); + + vm.0.inspect(&mut tracer, InspectExecutionMode::OneTx); + tracer.0 + } } /// Factory for the legacy VM (latest version). @@ -109,6 +135,19 @@ impl BenchmarkingVmFactory for Legacy { let storage = StorageView::new(storage).to_rc_ptr(); vm_latest::Vm::new(batch_env, system_env, storage) } + + fn count_instructions(tx: &Transaction) -> usize { + let mut vm = BenchmarkingVm::::default(); + vm.0.push_transaction(tx.clone()); + let count = Rc::new(RefCell::new(0)); + vm.0.inspect( + &mut InstructionCounter::new(count.clone()) + .into_tracer_pointer() + .into(), + InspectExecutionMode::OneTx, + ); + count.take() + } } #[derive(Debug)] @@ -143,7 +182,6 @@ impl Default for BenchmarkingVm { execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), }, &STORAGE, )) @@ -153,7 +191,7 @@ impl Default for BenchmarkingVm { impl BenchmarkingVm { pub fn run_transaction(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { self.0.push_transaction(tx.clone()); - self.0.execute(VmExecutionMode::OneTx) + self.0.execute(InspectExecutionMode::OneTx) } pub fn run_transaction_full(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { @@ -170,13 +208,6 @@ impl BenchmarkingVm { } tx_result } - - pub fn instruction_count(&mut self, tx: &Transaction) -> usize { - self.0.push_transaction(tx.clone()); - let count = Rc::new(RefCell::new(0)); - self.0.execute(VmExecutionMode::OneTx); // FIXME: re-enable instruction counting once new tracers are merged - count.take() - } } impl BenchmarkingVm { @@ -191,64 +222,64 @@ impl BenchmarkingVm { } } -#[cfg(test)] -mod tests { - use assert_matches::assert_matches; - use zksync_contracts::read_bytecode; - use zksync_multivm::interface::ExecutionResult; - - use super::*; - use crate::{ - get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, - get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, - }; - - #[test] - fn can_deploy_contract() { - let test_contract = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", - ); - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_deploy_tx(&test_contract)); - - assert_matches!(res.result, ExecutionResult::Success { .. }); - } - - #[test] - fn can_transfer() { - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_transfer_tx(0)); - assert_matches!(res.result, ExecutionResult::Success { .. }); - } - - #[test] - fn can_load_test() { - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_load_test_deploy_tx()); - assert_matches!(res.result, ExecutionResult::Success { .. }); - - let params = LoadTestParams::default(); - let res = vm.run_transaction(&get_load_test_tx(1, 10_000_000, params)); - assert_matches!(res.result, ExecutionResult::Success { .. }); - } - - #[test] - fn can_load_test_with_realistic_txs() { - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_load_test_deploy_tx()); - assert_matches!(res.result, ExecutionResult::Success { .. }); - - let res = vm.run_transaction(&get_realistic_load_test_tx(1)); - assert_matches!(res.result, ExecutionResult::Success { .. }); - } - - #[test] - fn can_load_test_with_heavy_txs() { - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_load_test_deploy_tx()); - assert_matches!(res.result, ExecutionResult::Success { .. }); - - let res = vm.run_transaction(&get_heavy_load_test_tx(1)); - assert_matches!(res.result, ExecutionResult::Success { .. }); - } -} +// #[cfg(test)] +// mod tests { +// use assert_matches::assert_matches; +// use zksync_contracts::read_bytecode; +// use zksync_multivm::interface::ExecutionResult; +// +// use super::*; +// use crate::{ +// get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, +// get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, +// }; +// +// #[test] +// fn can_deploy_contract() { +// let test_contract = read_bytecode( +// "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", +// ); +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_deploy_tx(&test_contract)); +// +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// +// #[test] +// fn can_transfer() { +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_transfer_tx(0)); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// +// #[test] +// fn can_load_test() { +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_load_test_deploy_tx()); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// +// let params = LoadTestParams::default(); +// let res = vm.run_transaction(&get_load_test_tx(1, 10_000_000, params)); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// +// #[test] +// fn can_load_test_with_realistic_txs() { +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_load_test_deploy_tx()); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// +// let res = vm.run_transaction(&get_realistic_load_test_tx(1)); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// +// #[test] +// fn can_load_test_with_heavy_txs() { +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_load_test_deploy_tx()); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// +// let res = vm.run_transaction(&get_heavy_load_test_tx(1)); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// } diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index 35a0faeb962..bd91a5a5b0e 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -3,20 +3,20 @@ services: reth: restart: always image: "ghcr.io/paradigmxyz/reth:v1.0.6" + ports: + - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata target: /chaindata command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config - ports: - - 127.0.0.1:8545:8545 zk: - image: ghcr.io/matter-labs/zk-environment:cuda-12-0-latest + image: ghcr.io/matter-labs/zk-environment:cuda-12_0-latest depends_on: - reth - postgres @@ -49,11 +49,18 @@ services: - /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host + pid: host deploy: resources: reservations: devices: - capabilities: [ gpu ] + postgres: image: "postgres:14" command: postgres -c 'max_connections=200' @@ -62,3 +69,7 @@ services: environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here - POSTGRES_PASSWORD=notsecurepassword + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index f95ae0d5f54..32665eb7010 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -16,7 +16,7 @@ services: - 127.0.0.1:8545:8545 zk: - image: "ghcr.io/matter-labs/zk-environment:cuda-11-8-latest" + image: "ghcr.io/matter-labs/zk-environment:cuda-11_8-latest" container_name: zk depends_on: - reth @@ -40,6 +40,11 @@ services: - GITHUB_WORKSPACE=$GITHUB_WORKSPACE env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host deploy: resources: reservations: diff --git a/docker-compose-runner-nightly.yml b/docker-compose-runner-nightly.yml index cadd1009f7a..4a854aa0b0a 100644 --- a/docker-compose-runner-nightly.yml +++ b/docker-compose-runner-nightly.yml @@ -1,4 +1,3 @@ -version: '3.2' services: zk: image: ghcr.io/matter-labs/zk-environment:latest2.0-lightweight-nightly @@ -15,3 +14,7 @@ services: extends: file: docker-compose.yml service: reth + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-unit-tests.yml b/docker-compose-unit-tests.yml index ddbc76bb196..b839be2d9f4 100644 --- a/docker-compose-unit-tests.yml +++ b/docker-compose-unit-tests.yml @@ -1,4 +1,3 @@ -version: '3.2' name: unit_tests services: # An instance of postgres configured to execute Rust unit-tests, tuned for performance. diff --git a/docker-compose.yml b/docker-compose.yml index 1e3a273ec9a..d8f40720fe8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,3 @@ -version: '3.2' services: reth: restart: always @@ -6,8 +5,8 @@ services: ports: - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata @@ -22,8 +21,8 @@ services: ports: - 127.0.0.1:5432:5432 volumes: - - type: bind - source: ./volumes/postgres + - type: volume + source: postgres-data target: /var/lib/postgresql/data environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here @@ -56,3 +55,7 @@ services: profiles: - runner network_mode: host + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker/Makefile b/docker/Makefile index 72189902aa1..4e0ca51f904 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -55,18 +55,20 @@ check-tools: check-nodejs check-yarn check-rust check-sqlx-cli check-docker chec # Check that contracts are checkout properly check-contracts: - @if [ ! -d ../contracts/l1-contracts/lib/forge-std/foundry.toml ] || [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ - echo "l1-contracts git submodule is missing. Please re-download repo with `git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git`"; \ + @if [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ + echo "l1-contracts git submodule is missing. Please re-download repo with 'git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git'"; \ exit 1; \ fi # Build and download needed contracts +# TODO Remove mkdir once we use foundry inside contracts repo prepare-contracts: check-tools check-contracts @cd ../ && \ export ZKSYNC_HOME=$$(pwd) && \ - export PATH=$$PATH:$${ZKSYNC_HOME}/bin && \ - zkt || true && \ - zk_supervisor contracts + export PATH=$$PATH:$${ZKSYNC_HOME}/bin:$${ZKSYNC_HOME}/zkstack_cli/zkstackup && \ + zkstackup -g --local || true && \ + zkstack dev contracts && \ + mkdir -p contracts/l1-contracts/artifacts # Download setup-key prepare-keys: @@ -91,9 +93,12 @@ build-witness-generator: check-tools prepare-keys $(DOCKER_BUILD_CMD) --file witness-generator/Dockerfile --load \ --tag witness-generator:$(PROTOCOL_VERSION) $(CONTEXT) +build-external-node: check-tools prepare-contracts + $(DOCKER_BUILD_CMD) --file external-node/Dockerfile --load \ + --tag external-node:$(PROTOCOL_VERSION) $(CONTEXT) # Build all containers -build-all: build-contract-verifier build-server-v2 build-witness-generator build-circuit-prover-gpu cleanup +build-all: build-contract-verifier build-server-v2 build-witness-generator build-circuit-prover-gpu build-external-node cleanup # Clean generated images clean-all: @@ -102,3 +107,4 @@ clean-all: docker rmi server-v2:$(PROTOCOL_VERSION) >/dev/null 2>&1 docker rmi prover:$(PROTOCOL_VERSION) >/dev/null 2>&1 docker rmi witness-generator:$(PROTOCOL_VERSION) >/dev/null 2>&1 + docker rmi external-node:$(PROTOCOL_VERSION) >/dev/null 2>&1 diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7943dae835a..e9d83903d11 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -47,7 +47,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ @@ -68,7 +68,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ @@ -91,10 +91,13 @@ RUN mkdir -p /etc/vyper-bin/0.3.10 \ && wget -O vyper0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux \ && mv vyper0.3.10 /etc/vyper-bin/0.3.10/vyper \ && chmod +x /etc/vyper-bin/0.3.10/vyper +RUN mkdir -p /etc/vyper-bin/0.4.0 \ + && wget -O vyper0.4.0 https://github.com/vyperlang/vyper/releases/download/v0.4.0/vyper.0.4.0+commit.e9db8d9f.linux \ + && mv vyper0.4.0 /etc/vyper-bin/0.4.0/vyper \ + && chmod +x /etc/vyper-bin/0.4.0/vyper COPY --from=builder /usr/src/zksync/target/release/zksync_contract_verifier /usr/bin/ -COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ -COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/system-contracts/zkout/ /contracts/system-contracts/zkout/ # CMD tail -f /dev/null ENTRYPOINT ["zksync_contract_verifier"] diff --git a/docker/contract-verifier/install-all-solc.sh b/docker/contract-verifier/install-all-solc.sh index 4fe992f8357..0c24b074130 100755 --- a/docker/contract-verifier/install-all-solc.sh +++ b/docker/contract-verifier/install-all-solc.sh @@ -26,7 +26,7 @@ done # Download zkVM solc list=( "0.8.25-1.0.0" "0.8.24-1.0.0" "0.8.23-1.0.0" "0.8.22-1.0.0" "0.8.21-1.0.0" "0.8.20-1.0.0" "0.8.19-1.0.0" "0.8.18-1.0.0" "0.8.17-1.0.0" "0.8.16-1.0.0" "0.8.15-1.0.0" "0.8.14-1.0.0" "0.8.13-1.0.0" "0.8.12-1.0.0" "0.8.11-1.0.0" "0.8.10-1.0.0" "0.8.9-1.0.0" "0.8.8-1.0.0" "0.8.7-1.0.0" "0.8.6-1.0.0" "0.8.5-1.0.0" "0.8.4-1.0.0" "0.8.3-1.0.0" "0.8.2-1.0.0" "0.8.1-1.0.0" "0.8.0-1.0.0" "0.7.6-1.0.0" "0.7.5-1.0.0" "0.7.4-1.0.0" "0.7.3-1.0.0" "0.7.2-1.0.0" "0.7.1-1.0.0" "0.7.0-1.0.0" "0.6.12-1.0.0" "0.6.11-1.0.0" "0.6.10-1.0.0" "0.6.9-1.0.0" "0.6.8-1.0.0" "0.6.7-1.0.0" "0.6.6-1.0.0" "0.6.5-1.0.0" "0.6.4-1.0.0" "0.6.3-1.0.0" "0.6.2-1.0.0" "0.6.1-1.0.0" "0.6.0-1.0.0" "0.5.17-1.0.0" "0.5.16-1.0.0" "0.5.15-1.0.0" "0.5.14-1.0.0" "0.5.13-1.0.0" "0.5.12-1.0.0" "0.5.11-1.0.0" "0.5.10-1.0.0" "0.5.9-1.0.0" "0.5.8-1.0.0" "0.5.7-1.0.0" "0.5.6-1.0.0" "0.5.5-1.0.0" "0.5.4-1.0.0" "0.5.3-1.0.0" "0.5.2-1.0.0" "0.5.1-1.0.0" "0.5.0-1.0.0" "0.4.26-1.0.0" "0.4.25-1.0.0" "0.4.24-1.0.0" "0.4.23-1.0.0" "0.4.22-1.0.0" "0.4.21-1.0.0" "0.4.20-1.0.0" "0.4.19-1.0.0" "0.4.18-1.0.0" "0.4.17-1.0.0" "0.4.16-1.0.0" "0.4.15-1.0.0" "0.4.14-1.0.0" "0.4.13-1.0.0" "0.4.12-1.0.0" - "0.8.27-1.0.1" "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" + "0.8.28-1.0.1" "0.8.27-1.0.1" "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" ) for version in ${list[@]}; do diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index aa1089ae7b3..f5c55860740 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -23,12 +23,9 @@ COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/local/cargo/bin/sqlx /usr/bin COPY --from=builder /usr/src/zksync/docker/external-node/entrypoint.sh /usr/bin -COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ -COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/system-contracts/zkout/ /contracts/system-contracts/zkout/ COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ -COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ +COPY contracts/l2-contracts/zkout/ /contracts/l2-contracts/zkout/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ diff --git a/docker/prover-autoscaler/Dockerfile b/docker/prover-autoscaler/Dockerfile new file mode 100644 index 00000000000..246e8099ffd --- /dev/null +++ b/docker/prover-autoscaler/Dockerfile @@ -0,0 +1,25 @@ +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +ARG DEBIAN_FRONTEND=noninteractive + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + +WORKDIR /usr/src/zksync +COPY . . + +RUN cd prover && cargo build --release --bin zksync_prover_autoscaler + +FROM ghcr.io/matter-labs/zksync-runtime-base:latest + +COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_autoscaler /usr/bin/ + +ENTRYPOINT ["/usr/bin/zksync_prover_autoscaler"] diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index 3e8b4f16bca..319d0cefbe3 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -31,12 +31,9 @@ EXPOSE 3030 COPY --from=builder /usr/src/zksync/target/release/zksync_server /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/src/zksync/target/release/merkle_tree_consistency_checker /usr/bin -COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ -COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/system-contracts/zkout/ /contracts/system-contracts/zkout/ COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ -COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ +COPY contracts/l2-contracts/zkout/ /contracts/l2-contracts/zkout/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile similarity index 94% rename from docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile index 0c0fd7a9bb3..fe44d55acbb 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04@sha256:3246518d9735254519e1b2ff35f95686e4a5011c90c85344c1f38df7bae9dd37 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -31,19 +31,19 @@ RUN apt-get update && apt-get install -y \ wget \ bzip2 \ unzip \ - hub + hub \ + curl \ + gnutls-bin git \ + build-essential \ + clang \ + lldb \ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - curl \ - gnutls-bin git \ - build-essential \ - clang \ - lldb \ - lld \ liburing-dev \ libclang-dev @@ -83,6 +83,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ @@ -104,7 +109,7 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile similarity index 95% rename from docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile index 5bd569b7d20..da041b12181 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -30,18 +30,18 @@ RUN apt-get update && apt-get install -y \ gnupg2 \ postgresql-client \ hub \ - unzip + unzip \ + gnutls-bin \ + build-essential \ + clang \ + lldb\ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - gnutls-bin \ - build-essential \ - clang \ - lldb\ - lld \ liburing-dev \ libclang-dev @@ -81,6 +81,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ @@ -93,7 +98,7 @@ RUN wget -c https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksol # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 53e53265311..c04e5720e4d 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -164,7 +164,7 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docs/guides/build-docker.md b/docs/guides/build-docker.md index a9e8f5d3e76..5dd9cff022b 100644 --- a/docs/guides/build-docker.md +++ b/docs/guides/build-docker.md @@ -25,6 +25,7 @@ contract-verifier:2.0 server-v2:2.0 prover:2.0 witness-generator:2.0 +external-node:2.0 ``` Alternatively, you may build only needed components - available targets are @@ -34,6 +35,7 @@ make -C ./docker build-contract-verifier make -C ./docker build-server-v2 make -C ./docker build-circuit-prover-gpu make -C ./docker build-witness-generator +make -C ./docker build-external-node ``` ## Building updated images diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 67a1b89eef5..07e52085cf4 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -6,6 +6,8 @@ Install `docker compose` and `Docker` ## Running ZKsync node locally +These commands start ZKsync node locally inside docker. + To start a mainnet instance, run: ```sh @@ -34,9 +36,10 @@ cd docker-compose-examples sudo docker compose --file testnet-external-node-docker-compose.yml down --volumes ``` -You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). +### Observability -Those commands start ZKsync node locally inside docker. +You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). You +can also access a debug page with more information about the node [here](http://localhost:5000). The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be accessed on port `3061`. diff --git a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml index c2bef23b2e4..f2a0ce31875 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml @@ -1,6 +1,6 @@ server_addr: '0.0.0.0:3054' public_addr: '127.0.0.1:3054' -debug_page_addr: '127.0.0.1:5000' +debug_page_addr: '0.0.0.0:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 gossip_static_outbound: diff --git a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml index 7a82705990c..a5f752fe405 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml @@ -1,6 +1,6 @@ server_addr: '0.0.0.0:3054' public_addr: '127.0.0.1:3054' -debug_page_addr: '127.0.0.1:5000' +debug_page_addr: '0.0.0.0:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 gossip_static_outbound: diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json index be869ead40b..74b4b822801 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 3, + "id": 2, "links": [], "liveNow": false, "panels": [ @@ -1005,7 +1005,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "Rate of RPC client requests, in packets per second.", + "description": "Rate of RPC client requests, in requests per second.", "fieldConfig": { "defaults": { "color": { @@ -1054,7 +1054,7 @@ } ] }, - "unit": "pps" + "unit": "reqps" }, "overrides": [] }, @@ -1098,7 +1098,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "Rate of RPC server responses, in packets per second.", + "description": "Rate of RPC server responses, in requests per second.", "fieldConfig": { "defaults": { "color": { @@ -1147,7 +1147,7 @@ } ] }, - "unit": "pps" + "unit": "reqps" }, "overrides": [] }, @@ -1202,6 +1202,6 @@ "timezone": "", "title": "Consensus", "uid": "STAAEORNk", - "version": 4, + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json index d7177ae802e..0b3cb681e3b 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 2, + "id": 1, "links": [], "liveNow": false, "panels": [ @@ -103,13 +103,49 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 0, + "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" } }, "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, "unit": "bytes" }, "overrides": [] @@ -123,18 +159,11 @@ "id": 2, "options": { "legend": { + "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, "tooltip": { "mode": "single", "sort": "none" @@ -167,7 +196,7 @@ } ], "title": "Total disk space usage", - "type": "piechart" + "type": "timeseries" }, { "datasource": { @@ -409,6 +438,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "Shows the batch numbers on the local node and the server node.", "fieldConfig": { "defaults": { "color": { @@ -421,7 +451,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 33, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -470,13 +500,13 @@ "x": 12, "y": 16 }, - "id": 4, + "id": 39, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { "mode": "single", @@ -489,14 +519,28 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "builder", "exemplar": true, - "expr": "sum by (stage) (external_node_sync_lag)", + "expr": "sum by(stage) (external_node_fetcher_l1_batch{stage=\"open\"})", "interval": "", - "legendFormat": "", + "legendFormat": "Server", + "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "zksync_consensus_storage_batch_store_next_persisted_batch", + "hide": false, + "legendFormat": "Local", + "range": true, + "refId": "B" } ], - "title": "Sync lag (blocks)", + "title": "L1 batch sync lag", "transformations": [], "type": "timeseries" }, @@ -546,8 +590,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -598,7 +641,6 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "The percentage of transactions that are being reverted or that are succeeding.", "fieldConfig": { "defaults": { "color": { @@ -610,8 +652,8 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "drawStyle": "bars", - "fillOpacity": 0, + "drawStyle": "line", + "fillOpacity": 33, "gradientMode": "none", "hideFrom": { "legend": false, @@ -619,16 +661,19 @@ "viz": false }, "lineInterpolation": "linear", - "lineWidth": 2, + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", - "spanNulls": false, + "spanNulls": true, "stacking": { "group": "A", - "mode": "percent" + "mode": "none" }, "thresholdsStyle": { "mode": "off" @@ -639,8 +684,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -657,13 +701,13 @@ "x": 12, "y": 24 }, - "id": 38, + "id": 4, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": true + "showLegend": false }, "tooltip": { "mode": "single", @@ -676,14 +720,15 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "builder", - "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", - "legendFormat": "__auto", - "range": true, + "exemplar": true, + "expr": "sum by (stage) (external_node_sync_lag)", + "interval": "", + "legendFormat": "", "refId": "A" } ], - "title": "Transactions execution status (%)", + "title": "L2 blocks sync lag", + "transformations": [], "type": "timeseries" }, { @@ -731,8 +776,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -778,6 +822,98 @@ "title": "Avg number of transactions in L2 block", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "The percentage of transactions that are being reverted or that are succeeding.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "percent" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions execution status (%)", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -823,8 +959,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -839,7 +974,7 @@ "h": 8, "w": 12, "x": 12, - "y": 32 + "y": 40 }, "id": 34, "options": { @@ -886,6 +1021,6 @@ "timezone": "", "title": "General", "uid": "1", - "version": 9, + "version": 3, "weekStart": "" } \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml index 65f33c78b0e..fac65298bbc 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml @@ -5,6 +5,7 @@ providers: orgId: 1 folder: '' type: file + allowUiUpdates: true disableDeletion: false updateIntervalSeconds: 10 # How often Grafana will scan for changed dashboards options: diff --git a/docs/specs/README.md b/docs/specs/README.md index 1f163bf7845..d0b087ae93e 100644 --- a/docs/specs/README.md +++ b/docs/specs/README.md @@ -33,4 +33,4 @@ 1. [ZK Chain ecosystem](./zk_chains/README.md) - [Overview](./zk_chains/overview.md) - [Shared Bridge](./zk_chains/shared_bridge.md) - - [Hyperbridges](./zk_chains/hyperbridges.md) + - [Interop](./zk_chains/interop.md) diff --git a/docs/specs/zk_chains/README.md b/docs/specs/zk_chains/README.md index 4de575899dd..ce0a7c311a2 100644 --- a/docs/specs/zk_chains/README.md +++ b/docs/specs/zk_chains/README.md @@ -2,4 +2,4 @@ - [Overview](./overview.md) - [Shared Bridge](./shared_bridge.md) -- [Hyperbridges](./hyperbridges.md) +- [Interop](./interop.md) diff --git a/docs/specs/zk_chains/gateway.md b/docs/specs/zk_chains/gateway.md new file mode 100644 index 00000000000..f4ee68e242e --- /dev/null +++ b/docs/specs/zk_chains/gateway.md @@ -0,0 +1 @@ +# Gateway diff --git a/docs/specs/zk_chains/hyperbridges.md b/docs/specs/zk_chains/hyperbridges.md deleted file mode 100644 index 614fe61427e..00000000000 --- a/docs/specs/zk_chains/hyperbridges.md +++ /dev/null @@ -1,41 +0,0 @@ -# Hyperbridges - -## Introduction - -In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized -that the core feature is hyperbridging, but we did not outline the hyperbridges themselves. This is because hyperbridges -are mostly L2 contracts. In this document we describe what hyperbridges are, and specify the necessary infrastructure. - -### Hyperbridge description - -Hyperbridges are trustless and cheap general native bridges between ZK Chains, allowing cross-chain function calls. -Trustlessness is achieved by relying on the main ZK Chain bridge to send a compressed message to L1, which is then sent -to and expanded on the destination ZK Chain. - -Technically they are a system of smart contracts that build on top of the enshrined L1<>L2 validating bridges, and can -interpret messages sent from L2 to L2 by verifying Merkle proofs. They are built alongside the protocol, they can -transfer the native asset of the ecosystem, and they can be used for asynchronous function calls between ZK Chains. - -![Hyperbridges](./img/hyperbridges.png) - -The trustless nature of hyperbridges allows the ecosystem to resemble a single VM. To illustrate imagine a new ZK Chain -joining the ecosystem. We will want ether/Dai/etc. to be accessible on this ZK Chain. This can be done automatically. -There will be a central erc20 deployer contract in the ecosystem, which will deploy the new ERC20 contract via the -hyperbridge. After the contract is deployed it will be able to interact other Dai contracts in the ecosystem. - -### High Level design - -![Hyperbridging](./img/hyperbridging.png) - -### L1 - -For the larger context see the [Shared Bridge](./shared_bridge.md) document, here we will focus on - -- HyperMailbox (part of Bridgehub). Contains the Hyperroot, root of Merkle tree of Hyperlogs. Hyperlogs are the L2->L1 - SysLogs that record the sent hyperbridge messages from the L2s. - -### L2 Contracts - -- Outbox system contract. It collects the hyperbridge txs into the hyperlog of the ZK Chain. -- Inbox system contract. This is where the hyperroot is imported and sent to L1 for settlement. Merkle proofs are - verified here, tx calls are started from here, nullifiers are stored here (add epochs later) diff --git a/docs/specs/zk_chains/interop.md b/docs/specs/zk_chains/interop.md new file mode 100644 index 00000000000..947742909b8 --- /dev/null +++ b/docs/specs/zk_chains/interop.md @@ -0,0 +1,49 @@ +# Interop + +## Introduction + +In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized +that the core feature is interop. Interop happens via the same L1->L2 interface as described in the L1SharedBridge doc. +There is (with the interop upgrade) a Bridgehub, AssetRouter, NativeTokenVault and Nullifier deployed on every L2, and +they serve the same feature as their L1 counterparts. Namely: + +- The Bridgehub is used to start the transaction. +- The AssetRouter and NativeTokenVault are the bridge contract that handle the tokens. +- The Nullifier is used to prevent reexecution of xL2 txs. + +### Interop process + +![Interop](./img/hyperbridging.png) + +The interop process has 7 main steps, each with its substeps: + +1. Starting the transaction on the sending chain + + - The user/calls calls the Bridgehub contract. If they want to use a bridge they call + `requestL2TransactionTwoBridges`, if they want to make a direct call they call `requestL2TransactionDirect` + function. + - The Bridgehub collects the base token fees necessary for the interop tx to be processed on the destination chain, + and if using the TwoBridges method the calldata and the destination contract ( for more data see Shared bridge + doc). + - The Bridgehub emits a `NewPriorityRequest` event, this is the same as the one in our Mailbox contract. This event + specifies the xL2 txs, which uses the same format as L1->L2 txs. This event can be picked up and used to receive + the txs. + - This new priority request is sent as an L2->L1 message, it is included in the chains merkle tree of emitted txs. + +2. The chain settles its proof on L1 or the Gateway, whichever is used as the settlement layer for the chain. +3. On the Settlement Layer (SL), the MessageRoot is updated in the MessageRoot contract. The new data includes all the + L2->L1 messages that are emitted from the settling chain. +4. The receiving chain picks up the updated MessgeRoot from the Settlement Layer. +5. Now the xL2 txs can be imported on the destination chain. Along with the txs, a merkle proof needs to be sent to link + it to the MessageRoot. +6. Receiving the tx on the destination chain + + - On the destination chain the xL2 txs is verified. This means the merkle proof is checked agains the MessageRoot. + This shows the the xL2 txs was indeed sent. + - After this the txs can be executed. The tx hash is stored in the L2Nullifier contract, so that the txs cannot be + replayed. + - The specified contract is called, with the calldata, and the message sender = + `keccak256(originalMessageSender, originChainId) >> 160`. This is to prevent the collision of the msg.sender + addresses. + +7. The destination chain settles on the SL and the MessageRoot that it imported is checked. diff --git a/docs/specs/zk_chains/shared_bridge.md b/docs/specs/zk_chains/shared_bridge.md index c464a7a154b..b43d3082b62 100644 --- a/docs/specs/zk_chains/shared_bridge.md +++ b/docs/specs/zk_chains/shared_bridge.md @@ -17,7 +17,7 @@ If you want to know more about ZK Chains, check this We want to create a system where: - ZK Chains should be launched permissionlessly within the ecosystem. -- Hyperbridges should enable unified liquidity for assets across the ecosystem. +- Interop should enable unified liquidity for assets across the ecosystem. - Multi-chain smart contracts need to be easy to develop, which means easy access to traditional bridges, and other supporting architecture. @@ -58,20 +58,19 @@ be able to leverage them when available). #### Bridgehub - Acts as a hub for bridges, so that they have a single point of communication with all ZK Chain contracts. This allows - L1 assets to be locked in the same contract for all ZK Chains, including L3s and validiums. The `Bridgehub` also - implements the following: + L1 assets to be locked in the same contract for all ZK Chains. The `Bridgehub` also implements the following features: - `Registry` This is where ZK Chains can register, starting in a permissioned manner, but with the goal to be - permissionless in the future. This is where their `chainID` is determined. L3s will also register here. This - `Registry` is also where State Transition contracts should register. Each chain has to specify its desired ST when - registering (Initially, only one will be available). + permissionless in the future. This is where their `chainID` is determined. Chains on Gateway will also register here. + This `Registry` is also where Chain Type Manager contracts should register. Each chain has to specify its desired CTM + when registering (Initially, only one will be available). ``` function newChain( uint256 _chainId, - address _stateTransition + address _chainTypeManager ) external returns (uint256 chainId); - function newStateTransition(address _stateTransition) external; + function newChainTypeManager(address _chainTypeManager) external; ``` - `BridgehubMailbox` routes messages to the Diamond proxy’s Mailbox facet based on chainID @@ -79,43 +78,73 @@ be able to leverage them when available). - Same as the current zkEVM [Mailbox](https://github.com/matter-labs/era-contracts/blob/main/l1-contracts/contracts/zksync/facets/Mailbox.sol), just with chainId, - - Ether needs to be deposited and withdrawn from here. - This is where L2 transactions can be requested. ``` - function requestL2Transaction( - uint256 _chainId, - address _contractL2, - uint256 _l2Value, - bytes calldata _calldata, - uint256 _l2GasLimit, - uint256 _l2GasPerPubdataByteLimit, - bytes[] calldata _factoryDeps, - address _refundRecipient - ) public payable override returns (bytes32 canonicalTxHash) { - address proofChain = bridgeheadStorage.proofChain[_chainId]; - canonicalTxHash = IProofChain(proofChain).requestL2TransactionBridgehead( - _chainId, - msg.value, - msg.sender, - _contractL2, - _l2Value, - _calldata, - _l2GasLimit, - _l2GasPerPubdataByteLimit, - _factoryDeps, - _refundRecipient - ); - } + function requestL2TransactionTwoBridges( + L2TransactionRequestTwoBridgesOuter calldata _request + ) ``` -- `Hypermailbox` - - This will allow general message passing (L2<>L2, L2<>L3, etc). This is where the `Mailbox` sends the `Hyperlogs`. - `Hyperlogs` are commitments to these messages sent from a single ZK Chain. `Hyperlogs` are aggregated into a - `HyperRoot` in the `HyperMailbox`. - - This component has not been implemented yet + ``` + struct L2TransactionRequestTwoBridgesOuter { + uint256 chainId; + uint256 mintValue; + uint256 l2Value; + uint256 l2GasLimit; + uint256 l2GasPerPubdataByteLimit; + address refundRecipient; + address secondBridgeAddress; + uint256 secondBridgeValue; + bytes secondBridgeCalldata; + } + ``` -#### Main asset shared bridges +``` + struct L2TransactionRequestTwoBridgesInner { + bytes32 magicValue; + address l2Contract; + bytes l2Calldata; + bytes[] factoryDeps; + bytes32 txDataHash; +} +``` + +- The `requestL2TransactionTwoBridges` function should be used most of the time when bridging to a chain ( the exeption + is when the user bridges directly to a contract on the L2, without using a bridge contract on L1). The logic of it is + the following: + + - The user wants to bridge to chain with the provided `L2TransactionRequestTwoBridgesOuter.chainId`. + - Two bridges are called, the baseTokenBridge (i.e. the L1SharedBridge or L1AssetRouter after the Gateway upgrade) and + an arbitrary second bridge. The Bridgehub will provide the original caller address to both bridges, which can + request that the appropriate amount of tokens are transferred from the caller to the bridge. The caller has to set + the appropriate allowance for both bridges. (Often the bridges coincide, but they don't have to). + - The `L2TransactionRequestTwoBridgesOuter.mintValue` is the amount of baseTokens that will be minted on L2. This is + the amount of tokens that the baseTokenBridge will request from the user. If the baseToken is Eth, it will be + forwarded to the baseTokenBridge. + - The `L2TransactionRequestTwoBridgesOuter.l2Value` is the amount of tokens that will be deposited on L2. The second + bridge and the Mailbox receives this as an input (although our second bridge does not use the value). + - The `L2TransactionRequestTwoBridgesOuter.l2GasLimit` is the maximum amount of gas that will be spent on L2 to + complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.l2GasPerPubdataByteLimit` is the maximum amount of gas per pubdata byte + that will be spent on L2 to complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.refundRecipient` is the address that will be refunded for the gas spent on + L2. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeAddress` is the address of the second bridge that will be + called. This is the arbitrary address that is called from the Bridgehub. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeValue` is the amount of tokens that will be deposited on L2. + The second bridge receives this value as the baseToken (i.e. Eth on L1). + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeCalldata` is the calldata that will be passed to the second + bridge. This is the arbitrary calldata that is passed from the Bridgehub to the second bridge. + - The secondBridge returns the `L2TransactionRequestTwoBridgesInner` struct to the Bridgehub. This is also passed to + the Mailbox as input. This is where the destination contract, calldata, factoryDeps are determined on the L2. + + This setup allows the user to bridge the baseToken of the origin chain A to a chain B with some other baseToken, by + specifying the A's token in the secondBridgeValue, which will be minted on the destination chain as an ERC20 token, + and specifying the amount of B's token in the mintValue, which will be minted as the baseToken and used to cover the + gas costs. + +#### Main asset shared bridges L2TransactionRequestTwoBridgesInner - Some assets have to be natively supported (ETH, WETH) and it also makes sense to support some generally accepted token standards (ERC20 tokens), as this makes it easy to bridge those tokens (and ensures a single version of them exists on @@ -147,25 +176,18 @@ be able to leverage them when available). ); ``` -This topic is now covered more thoroughly by the Custom native token discussion. - -[Custom native token compatible with Hyperbridging](https://www.notion.so/Custom-native-token-compatible-with-Hyperbridging-54e190a1a76f44248cf84a38304a0641?pvs=21) +#### Chain Type Manager -#### State Transition - -- `StateTransition` A state transition manages proof verification and DA for multiple chains. It also implements the +- `ChainTypeManager` A chain type manager manages proof verification and DA for multiple chains. It also implements the following functionalities: - - `StateTransitionRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same - for all chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s - `Registry`. At registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK - Chain. + - `ChainTypeRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same for all + chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s `Registry`. At + registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK Chain. - `Facets` and `Verifier` are shared across chains that relies on the same ST: `Base`, `Executor` , `Getters`, `Admin` , `Mailbox.`The `Verifier` is the contract that actually verifies the proof, and is called by the `Executor`. - Upgrade Mechanism The system requires all chains to be up-to-date with the latest implementation, so whenever an update is needed, we have to “force” each chain to update, but due to decentralization, we have to give each chain a - time frame (more information in the - [Upgrade Mechanism](https://www.notion.so/ZK-Stack-shared-bridge-alpha-version-a37c4746f8b54fb899d67e474bfac3bb?pvs=21) - section). This is done in the update mechanism contract, this is where the bootloader and system contracts are + time frame. This is done in the update mechanism contract, this is where the bootloader and system contracts are published, and the `ProposedUpgrade` is stored. Then each chain can call this upgrade for themselves as needed. After the deadline is over, the not-updated chains are frozen, that is, cannot post new proofs. Frozen chains can unfreeze by updating their proof system. @@ -180,6 +202,7 @@ This topic is now covered more thoroughly by the Custom native token discussion. - A chain might implement its own specific consensus mechanism. This needs its own contracts. Only this contract will be able to submit proofs to the State Transition contract. +- DA contracts. - Currently, the `ValidatorTimelock` is an example of such a contract. ### Components interactions @@ -199,22 +222,6 @@ features required to process proofs. The chain ID is set in the VM in a special -#### WETH Contract - -Ether, the native gas token is part of the core system contracts, so deploying it is not necessary. But WETH is just a -smart contract, it needs to be deployed and initialised. This happens from the L1 WETH bridge. This deploys on L2 the -corresponding bridge and ERC20 contract. This is deployed from L1, but the L2 address is known at deployment time. - -![deployWeth.png](./img/deployWeth.png) - -#### Deposit WETH - -The user can deposit WETH into the ecosystem using the WETH bridge on L1. The destination chain ID has to be specified. -The Bridgehub unwraps the WETH, and keeps the ETH, and send a message to the destination L2 to mint WETH to the -specified address. - -![depositWeth.png](./img/depositWeth.png) - --- ### Common Standards and Upgrades diff --git a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol new file mode 100644 index 00000000000..baa0d37b753 --- /dev/null +++ b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +/** + * Mock `KnownCodeStorage` counterpart producing `MarkedAsKnown` events and having `publishEVMBytecode` method + * added for EVM emulation, calls to which should be traced by the host. + */ +contract MockKnownCodeStorage { + event MarkedAsKnown(bytes32 indexed bytecodeHash, bool indexed sendBytecodeToL1); + + function markFactoryDeps(bool _shouldSendToL1, bytes32[] calldata _hashes) external { + unchecked { + uint256 hashesLen = _hashes.length; + for (uint256 i = 0; i < hashesLen; ++i) { + _markBytecodeAsPublished(_hashes[i], _shouldSendToL1); + } + } + } + + function markBytecodeAsPublished(bytes32 _bytecodeHash) external { + _markBytecodeAsPublished(_bytecodeHash, false); + } + + function _markBytecodeAsPublished(bytes32 _bytecodeHash, bool _shouldSendToL1) internal { + if (getMarker(_bytecodeHash) == 0) { + assembly { + sstore(_bytecodeHash, 1) + } + emit MarkedAsKnown(_bytecodeHash, _shouldSendToL1); + } + } + + bytes32 evmBytecodeHash; // For tests, it's OK to potentially collide with the marker slot for hash `bytes32(0)` + + /// Sets the EVM bytecode hash to be used in the next `publishEVMBytecode` call. + function setEVMBytecodeHash(bytes32 _bytecodeHash) external { + evmBytecodeHash = _bytecodeHash; + } + + function publishEVMBytecode(bytes calldata _bytecode) external { + bytes32 hash = evmBytecodeHash; + require(hash != bytes32(0), "EVM bytecode hash not set"); + + if (getMarker(evmBytecodeHash) == 0) { + assembly { + sstore(hash, 1) + } + } + emit MarkedAsKnown(hash, getMarker(hash) == 0); + evmBytecodeHash = bytes32(0); + } + + function getMarker(bytes32 _hash) public view returns (uint256 marker) { + assembly { + marker := sload(_hash) + } + } +} + +/** + * Mock `ContractDeployer` counterpart focusing on EVM bytecode deployment (via `create`; this isn't how real EVM bytecode deployment works, + * but it's good enough for low-level tests). + */ +contract MockContractDeployer { + enum AccountAbstractionVersion { + None, + Version1 + } + + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); + MockKnownCodeStorage constant KNOWN_CODE_STORAGE_CONTRACT = MockKnownCodeStorage(address(0x8004)); + + /// The returned value is obviously incorrect in the general case, but works well enough when called by the bootloader. + function extendedAccountVersion(address _address) public view returns (AccountAbstractionVersion) { + return AccountAbstractionVersion.Version1; + } + + /// Replaces real deployment with publishing a surrogate EVM "bytecode". + /// @param _salt bytecode hash + /// @param _input bytecode to publish + function create( + bytes32 _salt, + bytes32, // ignored, since it's not possible to set arbitrarily + bytes calldata _input + ) external payable returns (address) { + KNOWN_CODE_STORAGE_CONTRACT.setEVMBytecodeHash(_salt); + KNOWN_CODE_STORAGE_CONTRACT.publishEVMBytecode(_input); + address newAddress = address(uint160(msg.sender) + 1); + ACCOUNT_CODE_STORAGE_CONTRACT.storeAccountConstructedCodeHash(newAddress, _salt); + return newAddress; + } +} + +interface IAccountCodeStorage { + function getRawCodeHash(address _address) external view returns (bytes32); + function storeAccountConstructedCodeHash(address _address, bytes32 _hash) external; +} + +interface IRecursiveContract { + function recurse(uint _depth) external returns (uint); +} + +/// Native incrementing library. Not actually a library to simplify deployment. +contract IncrementingContract { + // Should not collide with other storage slots + uint constant INCREMENTED_SLOT = 0x123; + + function getIncrementedValue() public view returns (uint _value) { + assembly { + _value := sload(INCREMENTED_SLOT) + } + } + + function increment(address _thisAddress, uint _thisBalance) external { + require(msg.sender == tx.origin, "msg.sender not retained"); + require(address(this) == _thisAddress, "this address"); + require(address(this).balance == _thisBalance, "this balance"); + assembly { + sstore(INCREMENTED_SLOT, add(sload(INCREMENTED_SLOT), 1)) + } + } + + /// Tests delegation to a native or EVM contract at the specified target. + function testDelegateCall(address _target) external { + uint valueSnapshot = getIncrementedValue(); + (bool success, ) = _target.delegatecall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(success, "delegatecall reverted"); + require(getIncrementedValue() == valueSnapshot + 1, "invalid value"); + } + + function testStaticCall(address _target, uint _expectedValue) external { + (bool success, bytes memory rawValue) = _target.staticcall(abi.encodeCall( + this.getIncrementedValue, + () + )); + require(success, "static call reverted"); + (uint value) = abi.decode(rawValue, (uint)); + require(value == _expectedValue, "value mismatch"); + + (success, ) = _target.staticcall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(!success, "staticcall should've reverted"); + } +} + +uint constant EVM_EMULATOR_STIPEND = 1 << 30; + +/** + * Mock EVM emulator used in low-level tests. + */ +contract MockEvmEmulator is IRecursiveContract, IncrementingContract { + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); + + /// Set to `true` for testing logic sanity. + bool isUserSpace; + + modifier validEvmEntry() { + if (!isUserSpace) { + require(gasleft() >= EVM_EMULATOR_STIPEND, "no stipend"); + // Fetch bytecode for the executed contract. + bytes32 bytecodeHash = ACCOUNT_CODE_STORAGE_CONTRACT.getRawCodeHash(address(this)); + require(bytecodeHash != bytes32(0), "called contract not deployed"); + uint bytecodeVersion = uint(bytecodeHash) >> 248; + require(bytecodeVersion == 2, "non-EVM bytecode"); + + // Check that members of the current address are well-defined. + require(address(this).code.length != 0, "invalid code"); + require(address(this).codehash == bytecodeHash, "bytecode hash mismatch"); + } + _; + } + + function testPayment(uint _expectedValue, uint _expectedBalance) public payable validEvmEntry { + require(msg.value == _expectedValue, "unexpected msg.value"); + require(address(this).balance == _expectedBalance, "unexpected balance"); + } + + IRecursiveContract recursionTarget; + + function recurse(uint _depth) public validEvmEntry returns (uint) { + require(gasleft() < 2 * EVM_EMULATOR_STIPEND, "stipend provided multiple times"); + + if (_depth <= 1) { + return 1; + } else { + IRecursiveContract target = (address(recursionTarget) == address(0)) ? this : recursionTarget; + // The real emulator limits amount of gas when performing far calls by EVM gas, so we emulate this behavior as well. + uint gasToSend = isUserSpace ? gasleft() : (gasleft() - EVM_EMULATOR_STIPEND); + return target.recurse{gas: gasToSend}(_depth - 1) * _depth; + } + } + + function testRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + require(recurse(_depth) == _expectedValue, "incorrect recursion"); + } + + function testExternalRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + recursionTarget = new NativeRecursiveContract(IRecursiveContract(this)); + uint returnedValue = recurse(_depth); + recursionTarget = this; // This won't work on revert, but for tests, it's good enough + require(returnedValue == _expectedValue, "incorrect recursion"); + } + + MockContractDeployer constant CONTRACT_DEPLOYER_CONTRACT = MockContractDeployer(address(0x8006)); + + /// Emulates EVM contract deployment and a subsequent call to it in a single transaction. + function testDeploymentAndCall(bytes32 _evmBytecodeHash, bytes calldata _evmBytecode) external validEvmEntry { + IRecursiveContract newContract = IRecursiveContract(CONTRACT_DEPLOYER_CONTRACT.create( + _evmBytecodeHash, + _evmBytecodeHash, + _evmBytecode + )); + require(uint160(address(newContract)) == uint160(address(this)) + 1, "unexpected address"); + require(address(newContract).code.length > 0, "contract code length"); + require(address(newContract).codehash != bytes32(0), "contract code hash"); + + uint gasToSend = gasleft() - EVM_EMULATOR_STIPEND; + require(newContract.recurse{gas: gasToSend}(5) == 120, "unexpected recursive result"); + } + + fallback() external validEvmEntry { + require(msg.data.length == 0, "unsupported call"); + } +} + +contract NativeRecursiveContract is IRecursiveContract { + IRecursiveContract target; + + constructor(IRecursiveContract _target) { + target = _target; + } + + function recurse(uint _depth) external returns (uint) { + require(gasleft() < EVM_EMULATOR_STIPEND, "stipend spilled to native contract"); + return (_depth <= 1) ? 1 : target.recurse(_depth - 1) * _depth; + } +} diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index bf69fd48e7b..bda8b88b548 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -29,8 +29,8 @@ RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2 GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 -GENESIS_PROTOCOL_VERSION = "25" -GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.25.2" +GENESIS_PROTOCOL_VERSION = "27" +GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.27.0" L1_WETH_BRIDGE_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_BRIDGE_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_TOKEN_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" diff --git a/etc/env/base/eth_sender.toml b/etc/env/base/eth_sender.toml index aed36bb697f..29a4a14e964 100644 --- a/etc/env/base/eth_sender.toml +++ b/etc/env/base/eth_sender.toml @@ -57,8 +57,8 @@ default_priority_fee_per_gas = 1_000_000_000 max_base_fee_samples = 10_000 # These two are parameters of the base_fee_per_gas formula in GasAdjuster. # The possible formulas are: -# 1. base_fee_median * (A + B * time_in_mempool) -# 2. base_fee_median * A * B ^ time_in_mempool +# 1. base_fee_median * (A + B * time_in_mempool_in_l1_blocks) +# 2. base_fee_median * A * B ^ time_in_mempool_in_l1_blocks # Currently the second is used. # To confirm, see core/bin/zksync_core/src/eth_sender/gas_adjuster/mod.rs pricing_formula_parameter_a = 1.5 diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index d8bef020c64..18107f0d4f9 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -1,6 +1,6 @@ # Environment configuration for the Rust code # We don't provide the group name like `[rust]` here, because we don't want -# these variables to be prefixed during the compiling. +# these variables to be prefixed during the compiling. # `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. @@ -26,7 +26,6 @@ zksync_node_sync=info,\ zksync_node_consensus=info,\ zksync_contract_verification_server=info,\ zksync_node_api_server=info,\ -zksync_tee_verifier_input_producer=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ diff --git a/etc/env/consensus_config.yaml b/etc/env/consensus_config.yaml index 304ea31fac9..2564865eeb3 100644 --- a/etc/env/consensus_config.yaml +++ b/etc/env/consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3054 server_addr: "127.0.0.1:3054" public_addr: "127.0.0.1:3054" max_payload_size: 2500000 diff --git a/etc/env/en_consensus_config.yaml b/etc/env/en_consensus_config.yaml index f759e72e891..5c428866cb6 100644 --- a/etc/env/en_consensus_config.yaml +++ b/etc/env/en_consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3055 server_addr: '127.0.0.1:3055' public_addr: '127.0.0.1:3055' max_payload_size: 2500000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index a4ba8c0201a..587ba4614a5 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -312,7 +312,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset @@ -375,3 +375,10 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 + +consensus: + port: 3054 + server_addr: "127.0.0.1:3054" + public_addr: "127.0.0.1:3054" + max_payload_size: 2500000 + gossip_dynamic_inbound_limit: 100 diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 33634c253ba..212c17c2bf4 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -1,7 +1,6 @@ genesis_root: 0x526a5d3e384ff95a976283c79a976e0a2fb749e4631233f29d3765201efd937d genesis_batch_commitment: 0xb9794246425fd654cf6a4c2e9adfdd48aaaf97bf3b8ba6bdc88e1d141bcfa5b3 genesis_rollup_leaf_index: 64 -genesis_protocol_version: 25 default_aa_hash: 0x0100055d3993e14104994ca4d8cfa91beb9b544ee86894b45708b4824d832ff2 bootloader_hash: 0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf l1_chain_id: 9 @@ -9,6 +8,8 @@ l2_chain_id: 270 fee_account: '0x0000000000000000000000000000000000000001' prover: dummy_verifier: true - snark_wrapper_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 -genesis_protocol_semantic_version: 0.25.0 + recursion_scheduler_level_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 +genesis_protocol_semantic_version: 0.27.0 l1_batch_commit_data_generator_mode: Rollup +# TODO: uncomment once EVM emulator is present in the `contracts` submodule +# evm_emulator_hash: 0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91 diff --git a/etc/env/file_based/overrides/mainnet.yaml b/etc/env/file_based/overrides/mainnet.yaml index 0600abf694c..7565aac869a 100644 --- a/etc/env/file_based/overrides/mainnet.yaml +++ b/etc/env/file_based/overrides/mainnet.yaml @@ -1,5 +1,6 @@ state_keeper: - block_commit_deadline_ms: 3600000 + # Default batch seal time deadline: 8 hours + block_commit_deadline_ms: 28000000 minimal_l2_gas_price: 45250000 eth: sender: diff --git a/etc/env/file_based/overrides/testnet.yaml b/etc/env/file_based/overrides/testnet.yaml index e4da1ac96e2..d36cf9fc7bc 100644 --- a/etc/env/file_based/overrides/testnet.yaml +++ b/etc/env/file_based/overrides/testnet.yaml @@ -1,5 +1,6 @@ state_keeper: - block_commit_deadline_ms: 3600000 + # Default batch seal time deadline: 8 hours + block_commit_deadline_ms: 28000000 minimal_l2_gas_price: 25000000 eth: sender: diff --git a/etc/lint-config/ignore.yaml b/etc/lint-config/ignore.yaml index 3d0c4869df8..b4456a6c3fd 100644 --- a/etc/lint-config/ignore.yaml +++ b/etc/lint-config/ignore.yaml @@ -2,7 +2,8 @@ files: [ "KeysWithPlonkVerifier.sol", "TokenInit.sol", ".tslintrc.js", - ".prettierrc.js" + ".prettierrc.js", + "era-observability/README.md" ] dirs: [ "target", diff --git a/etc/multivm_bootloaders/vm_gateway/commit b/etc/multivm_bootloaders/vm_gateway/commit new file mode 100644 index 00000000000..a3547f57703 --- /dev/null +++ b/etc/multivm_bootloaders/vm_gateway/commit @@ -0,0 +1 @@ +a8bf0ca28d43899882a2e123e2fdf1379f0fd656 diff --git a/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin index f1e4fea448d452188464bfb65811f54bba94105c..f1b46172d6db15bff4ca6894b7f0d73830430003 100644 GIT binary patch delta 44 zcmV+{0Mq}V%LJgy1c0;wfF&UNHa&>zyn&c0iZIgSq9KYy6uj=aPAfiIpVA+oMmEaP Cz7(|p delta 44 zcmV+{0Mq}V%LJgy1c0;wfF&To?Ls%Ogqo6W_D`(+8@Cky-A&g-2?sN81h?b?P?liZ CzZ8A| diff --git a/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin index febc7363df05767f19a7ae0684e355711ca4ac76..34d17f1752fae7f38ab1584743ce245159ff2159 100644 GIT binary patch delta 44 zcmV+{0Mq~AtOVe!1c0;wKo%gO8o0wFkRBcTW2y+SkGO# C1QN{v delta 44 zcmV+{0Mq~AtOVe!1c0;wKo%gC{aeso!Cix14hbRRz`^tEmX5!9Y{sFYpdVYihi_Kq CUKUXR diff --git a/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin index 8a27d4617fdb725b033fc5cb6f06443101a8cede..55d570779dc60bfb6b763419476749bf4287307f 100644 GIT binary patch delta 44 zcmV+{0Mq~A%mm=f1c0;wKqeqdcsH@nzp*$8$zKq7Brm~gnrIq-M?G|C@Os@-2fH7% CR}$+0 delta 44 zcmV+{0Mq~A%mm=f1c0;wKqer=yT(wLQerluD&;tf@Qk%+?%e=SglTI&tA7()#uDM< C2Ngd6 diff --git a/etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin index c784db5a53e8b5b023d6d87eeb5c2a15c3097ce3..4ba51692817de4311a974614cd0c22eafe94c137 100644 GIT binary patch delta 44 zcmV+{0Mq~Aums?+1c0;wKo}rq6%4UAMrnE1SZ0^0$~cT Cof6gn delta 44 zcmV+{0Mq~Aums?+1c0;wKo}q|a%V0*K>`kfUfiEVc2{&d^{lNA<=BwZMS_$RgzBr6 CtP{Ba diff --git a/etc/nix/tee_prover.nix b/etc/nix/tee_prover.nix index 0b424522dff..55545d1bb8e 100644 --- a/etc/nix/tee_prover.nix +++ b/etc/nix/tee_prover.nix @@ -1,12 +1,19 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: -craneLib.buildPackage (commonArgs // { +let pname = "zksync_tee_prover"; + cargoExtraArgs = "--locked -p zksync_tee_prover"; +in +craneLib.buildPackage (commonArgs // { + inherit pname; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; - cargoExtraArgs = "-p zksync_tee_prover --bin zksync_tee_prover"; - inherit cargoArtifacts; + inherit cargoExtraArgs; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + inherit pname; + inherit cargoExtraArgs; + }); postInstall = '' strip $out/bin/zksync_tee_prover diff --git a/etc/nix/zksync.nix b/etc/nix/zksync.nix index c5fffc48b09..1ecac58b5d9 100644 --- a/etc/nix/zksync.nix +++ b/etc/nix/zksync.nix @@ -1,12 +1,14 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: craneLib.buildPackage (commonArgs // { pname = "zksync"; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; cargoExtraArgs = "--all"; - inherit cargoArtifacts; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + pname = "zksync-era-workspace"; + }); outputs = [ "out" diff --git a/etc/upgrades/1728066632-protocol-defense/common.json b/etc/upgrades/1728066632-protocol-defense/common.json new file mode 100644 index 00000000000..4011159e2dc --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/common.json @@ -0,0 +1,5 @@ +{ + "name": "protocol-defense", + "creationTimestamp": 1728066632, + "protocolVersion": "0.25.0" +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/crypto.json b/etc/upgrades/1728066632-protocol-defense/stage/crypto.json new file mode 100644 index 00000000000..65f8a3cc066 --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/crypto.json @@ -0,0 +1,6 @@ +{ + "verifier": { + "address": "0x06aa7a7B07108F7C5539645e32DD5c21cBF9EB66", + "txHash": "0x1e14eaa49a225d6707016cb7525ba3839e9589c0a85307105d1036133ce6c319" + } +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json b/etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json new file mode 100644 index 00000000000..7389360d64e --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json @@ -0,0 +1,198 @@ +[ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x90C0A0a63d7ff47BfAA1e9F8fa554dabc986504a", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x81754d2E48e3e553ba6Dfd193FC72B3A0c6076d9", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x922805Cf0C00C9A19C14603529Fb1a6f63861d80", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xBB13642F795014E0EAC2b0d52ECD5162ECb66712", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 0, + "isFreezable": true + } +] \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/facets.json b/etc/upgrades/1728066632-protocol-defense/stage/facets.json new file mode 100644 index 00000000000..acc6456181e --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/facets.json @@ -0,0 +1,18 @@ +{ + "ExecutorFacet": { + "address": "0xBB13642F795014E0EAC2b0d52ECD5162ECb66712", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "AdminFacet": { + "address": "0x90C0A0a63d7ff47BfAA1e9F8fa554dabc986504a", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "GettersFacet": { + "address": "0x81754d2E48e3e553ba6Dfd193FC72B3A0c6076d9", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "MailboxFacet": { + "address": "0x922805Cf0C00C9A19C14603529Fb1a6f63861d80", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json b/etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json new file mode 100644 index 00000000000..4ebb6009f3f --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json @@ -0,0 +1,394 @@ +{ + "systemContracts": [ + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd" + ], + "address": "0x0000000000000000000000000000000000000000" + }, + { + "name": "Ecrecover", + "bytecodeHashes": [ + "0x010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b" + ], + "address": "0x0000000000000000000000000000000000000001" + }, + { + "name": "SHA256", + "bytecodeHashes": [ + "0x010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a" + ], + "address": "0x0000000000000000000000000000000000000002" + }, + { + "name": "EcAdd", + "bytecodeHashes": [ + "0x01000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b85" + ], + "address": "0x0000000000000000000000000000000000000006" + }, + { + "name": "EcMul", + "bytecodeHashes": [ + "0x010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b66" + ], + "address": "0x0000000000000000000000000000000000000007" + }, + { + "name": "EcPairing", + "bytecodeHashes": [ + "0x01000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b299" + ], + "address": "0x0000000000000000000000000000000000000008" + }, + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd" + ], + "address": "0x0000000000000000000000000000000000008001" + }, + { + "name": "AccountCodeStorage", + "bytecodeHashes": [ + "0x0100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e1" + ], + "address": "0x0000000000000000000000000000000000008002" + }, + { + "name": "NonceHolder", + "bytecodeHashes": [ + "0x010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c73" + ], + "address": "0x0000000000000000000000000000000000008003" + }, + { + "name": "KnownCodesStorage", + "bytecodeHashes": [ + "0x0100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac9" + ], + "address": "0x0000000000000000000000000000000000008004" + }, + { + "name": "ImmutableSimulator", + "bytecodeHashes": [ + "0x01000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a" + ], + "address": "0x0000000000000000000000000000000000008005" + }, + { + "name": "ContractDeployer", + "bytecodeHashes": [ + "0x010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee" + ], + "address": "0x0000000000000000000000000000000000008006" + }, + { + "name": "L1Messenger", + "bytecodeHashes": [ + "0x010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e" + ], + "address": "0x0000000000000000000000000000000000008008" + }, + { + "name": "MsgValueSimulator", + "bytecodeHashes": [ + "0x0100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f" + ], + "address": "0x0000000000000000000000000000000000008009" + }, + { + "name": "L2BaseToken", + "bytecodeHashes": [ + "0x01000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4" + ], + "address": "0x000000000000000000000000000000000000800a" + }, + { + "name": "SystemContext", + "bytecodeHashes": [ + "0x010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5" + ], + "address": "0x000000000000000000000000000000000000800b" + }, + { + "name": "BootloaderUtilities", + "bytecodeHashes": [ + "0x010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce" + ], + "address": "0x000000000000000000000000000000000000800c" + }, + { + "name": "EventWriter", + "bytecodeHashes": [ + "0x010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98" + ], + "address": "0x000000000000000000000000000000000000800d" + }, + { + "name": "Compressor", + "bytecodeHashes": [ + "0x0100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576" + ], + "address": "0x000000000000000000000000000000000000800e" + }, + { + "name": "ComplexUpgrader", + "bytecodeHashes": [ + "0x0100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d" + ], + "address": "0x000000000000000000000000000000000000800f" + }, + { + "name": "Keccak256", + "bytecodeHashes": [ + "0x0100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b" + ], + "address": "0x0000000000000000000000000000000000008010" + }, + { + "name": "CodeOracle", + "bytecodeHashes": [ + "0x01000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e3" + ], + "address": "0x0000000000000000000000000000000000008012" + }, + { + "name": "P256Verify", + "bytecodeHashes": [ + "0x010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a" + ], + "address": "0x0000000000000000000000000000000000000100" + }, + { + "name": "PubdataChunkPublisher", + "bytecodeHashes": [ + "0x010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db7" + ], + "address": "0x0000000000000000000000000000000000008011" + }, + { + "name": "Create2Factory", + "bytecodeHashes": [ + "0x010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf546" + ], + "address": "0x0000000000000000000000000000000000010000" + } + ], + "defaultAA": { + "name": "DefaultAccount", + "bytecodeHashes": [ + "0x0100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe30" + ] + }, + "bootloader": { + "name": "Bootloader", + "bytecodeHashes": [ + "0x010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b678" + ] + }, + "forcedDeployments": [ + { + "bytecodeHash": "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd", + "newAddress": "0x0000000000000000000000000000000000000000", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b", + "newAddress": "0x0000000000000000000000000000000000000001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a", + "newAddress": "0x0000000000000000000000000000000000000002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b85", + "newAddress": "0x0000000000000000000000000000000000000006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b66", + "newAddress": "0x0000000000000000000000000000000000000007", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b299", + "newAddress": "0x0000000000000000000000000000000000000008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd", + "newAddress": "0x0000000000000000000000000000000000008001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e1", + "newAddress": "0x0000000000000000000000000000000000008002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c73", + "newAddress": "0x0000000000000000000000000000000000008003", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac9", + "newAddress": "0x0000000000000000000000000000000000008004", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a", + "newAddress": "0x0000000000000000000000000000000000008005", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee", + "newAddress": "0x0000000000000000000000000000000000008006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e", + "newAddress": "0x0000000000000000000000000000000000008008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f", + "newAddress": "0x0000000000000000000000000000000000008009", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4", + "newAddress": "0x000000000000000000000000000000000000800a", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5", + "newAddress": "0x000000000000000000000000000000000000800b", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce", + "newAddress": "0x000000000000000000000000000000000000800c", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98", + "newAddress": "0x000000000000000000000000000000000000800d", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576", + "newAddress": "0x000000000000000000000000000000000000800e", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d", + "newAddress": "0x000000000000000000000000000000000000800f", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b", + "newAddress": "0x0000000000000000000000000000000000008010", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e3", + "newAddress": "0x0000000000000000000000000000000000008012", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a", + "newAddress": "0x0000000000000000000000000000000000000100", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db7", + "newAddress": "0x0000000000000000000000000000000000008011", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf546", + "newAddress": "0x0000000000000000000000000000000000010000", + "value": 0, + "input": "0x", + "callConstructor": false + } + ], + "forcedDeploymentCalldata": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "calldata": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "tx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 25, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "delegatedCalldata": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/transactions.json b/etc/upgrades/1728066632-protocol-defense/stage/transactions.json new file mode 100644 index 00000000000..fceeb9bd407 --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/transactions.json @@ -0,0 +1,253 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 25, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b678", + "defaultAccountHash": "0x0100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe30", + "verifier": "0x06aa7a7B07108F7C5539645e32DD5c21cBF9EB66", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x6704ae40" + }, + "factoryDeps": [], + "newProtocolVersion": 107374182400, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xc11816734C1961ed67a9e2A34d9956eF8d03AD72", + "protocolVersionSemVer": "0.25.0", + "packedProtocolVersion": 107374182400, + "upgradeTimestamp": "1728360000", + "stmUpgradeCalldata": "0x2e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000c11816734c1961ed67a9e2a34d9956ef8d03ad72000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000e80000000000000000000000000000000000000000000000000000000000000144000000000000000000000000000000000000000000000000000000000000015e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000922805cf0c00c9a19c14603529fb1a6f63861d80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da43000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b4408284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "chainAdminUpgradeCalldata": "0x69340beb00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000033c4fc57565f000000000000000000000000000000000000000000000000000000180000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000c11816734c1961ed67a9e2a34d9956ef8d03ad72000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000e80000000000000000000000000000000000000000000000000000000000000144000000000000000000000000000000000000000000000000000000000000015e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000922805cf0c00c9a19c14603529fb1a6f63861d80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da43000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b4408284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x90C0A0a63d7ff47BfAA1e9F8fa554dabc986504a", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x81754d2E48e3e553ba6Dfd193FC72B3A0c6076d9", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x922805Cf0C00C9A19C14603529Fb1a6f63861d80", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xBB13642F795014E0EAC2b0d52ECD5162ECb66712", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 0, + "isFreezable": true + } + ], + "initAddress": "0xc11816734C1961ed67a9e2A34d9956eF8d03AD72", + "initCalldata": "0x08284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/utils/src/index.ts b/etc/utils/src/index.ts index 0e2d5e25eda..6246a209c84 100644 --- a/etc/utils/src/index.ts +++ b/etc/utils/src/index.ts @@ -26,6 +26,7 @@ const IGNORED_DIRS = [ 'cache-zk', // Ignore directories with OZ and forge submodules. 'contracts/l1-contracts/lib', + 'contracts/lib', 'era-observability' ]; const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc.js', '.prettierrc.js']; diff --git a/flake.nix b/flake.nix index ef618816f9c..8c08e880910 100644 --- a/flake.nix +++ b/flake.nix @@ -47,7 +47,7 @@ packages = { # to ease potential cross-compilation, the overlay is used inherit (appliedOverlay.zksync-era) zksync tee_prover container-tee-prover-azure container-tee-prover-dcap; - default = appliedOverlay.zksync-era.zksync; + default = appliedOverlay.zksync-era.tee_prover; }; devShells.default = appliedOverlay.zksync-era.devShell; @@ -91,7 +91,7 @@ ./Cargo.toml ./core ./prover - ./zk_toolbox + ./zkstack_cli ./.github/release-please/manifest.json ]; }; @@ -107,10 +107,6 @@ strictDeps = true; inherit hardeningEnable; }; - - cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { - pname = "zksync-era-workspace"; - }); in { zksync-era = rec { @@ -120,12 +116,11 @@ }; zksync = pkgs.callPackage ./etc/nix/zksync.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; + tee_prover = pkgs.callPackage ./etc/nix/tee_prover.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; diff --git a/infrastructure/protocol-upgrade/README.md b/infrastructure/protocol-upgrade/README.md index da5ee313dab..c7998b96123 100644 --- a/infrastructure/protocol-upgrade/README.md +++ b/infrastructure/protocol-upgrade/README.md @@ -25,13 +25,15 @@ If not provided as arguments, the tool can retrieve certain values from environm 2. `l2rpc` - `API_WEB3_JSON_RPC_HTTP_URL` 3. `create2-address` - `CONTRACTS_CREATE2_FACTORY_ADDR` 4. `zksync-address` - `CONTRACTS_DIAMOND_PROXY_ADDR` -5. `nonce` - Taken from the node via `l1rpc` -6. `gas-price` - Taken from the node via `l1rpc` -7. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, +5. `upgrade-address` - `CONTRACTS_DEFAULT_UPGRADE_ADDR` +6. `l2-upgrader-address` - `CONTRACTS_L2_DEFAULT_UPGRADE_ADDR` +7. `nonce` - Taken from the node via `l1rpc` +8. `gas-price` - Taken from the node via `l1rpc` +9. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, `testnet2`, `stage2`, `mainnet2`. Each upgrade on different environments is performed separately since the contract addresses differ between environments. -8. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it - explicitly. +10. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it + explicitly. ### Create a Protocol Upgrade Proposal @@ -215,8 +217,7 @@ $ zk f yarn start transactions build-default \ --l2-upgrader-address \ --diamond-upgrade-proposal-id \ --l1rpc \ ---zksync-address \ ---use-new-governance +--zksync-address ``` To execute the `proposeTransparentUpgrade` transaction on L1, use the following command: @@ -228,7 +229,6 @@ $ zk f yarn start transactions propose-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -241,7 +241,6 @@ $ zk f yarn start transactions execute-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -254,6 +253,5 @@ $ zk f yarn start transactions cancel-upgrade \ --zksync-address \ --gas-price \ --nonce \ ---new-governance \ --environment ``` diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index dfea3a3bfc3..bd7df8ab456 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -3,12 +3,10 @@ import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-c import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, AdminFacetFactory, - GovernanceFactory, StateTransitionManagerFactory, ChainAdminFactory } from 'l1-contracts/typechain'; import { FacetCut } from 'l1-contracts/src.ts/diamondCut'; -import { IZkSyncFactory } from '../pre-boojum/IZkSyncFactory'; import { ComplexUpgraderFactory } from 'system-contracts/typechain'; import { getCommonDataFileName, @@ -29,12 +27,26 @@ import * as path from 'path'; const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); +export enum Action { + Add = 0, + Replace = 1, + Remove = 2 +} + export interface DiamondCutData { facetCuts: FacetCut[]; initAddress: string; initCalldata: string; } +export interface ChainCreationParams { + genesisUpgrade: string; + genesisBatchHash: string; + genesisIndexRepeatedStorageChanges: number; + genesisBatchCommitment: string; + diamondCut: DiamondCutData; +} + export interface ForceDeployment { // The bytecode hash to put on an address bytecodeHash: BytesLike; @@ -89,7 +101,6 @@ export interface ProposedUpgrade { postUpgradeCalldata: BytesLike; upgradeTimestamp: ethers.BigNumber; newProtocolVersion: BigNumberish; - newAllowList: string; } function buildNoopL2UpgradeTx(): L2CanonicalTransaction { @@ -123,10 +134,8 @@ export function buildProposeUpgrade( bootloaderHash?: BytesLike, defaultAccountHash?: BytesLike, verifier?: string, - newAllowList?: string, l2ProtocolUpgradeTx?: L2CanonicalTransaction ): ProposedUpgrade { - newAllowList = newAllowList ?? ethers.constants.AddressZero; bootloaderHash = bootloaderHash ?? ethers.constants.HashZero; defaultAccountHash = defaultAccountHash ?? ethers.constants.HashZero; l1ContractsUpgradeCalldata = l1ContractsUpgradeCalldata ?? '0x'; @@ -142,8 +151,7 @@ export function buildProposeUpgrade( postUpgradeCalldata, upgradeTimestamp, factoryDeps: [], - newProtocolVersion, - newAllowList + newProtocolVersion }; } @@ -171,43 +179,6 @@ export function prepareDefaultCalldataForL2upgrade(forcedDeployments: ForceDeplo return complexUpgraderCalldata; } -interface GovernanceTx { - scheduleCalldata: string; - executeCalldata: string; - operation: any; -} - -function prepareGovernanceTxs(target: string, data: BytesLike): GovernanceTx { - const govCall = { - target: target, - value: 0, - data: data - }; - - const operation = { - calls: [govCall], - predecessor: ethers.constants.HashZero, - salt: ethers.constants.HashZero - }; - - const governance = new GovernanceFactory(); - - // Get transaction data of the `scheduleTransparent` - const scheduleCalldata = governance.interface.encodeFunctionData('scheduleTransparent', [ - operation, - 0 // delay - ]); - - // Get transaction data of the `execute` - const executeCalldata = governance.interface.encodeFunctionData('execute', [operation]); - - return { - scheduleCalldata, - executeCalldata, - operation - }; -} - function prepareChainAdminCalldata(target: string, data: BytesLike): string { const call = { target: target, @@ -221,15 +192,18 @@ function prepareChainAdminCalldata(target: string, data: BytesLike): string { return calldata; } -export function prepareTransparentUpgradeCalldataForNewGovernance( +export function prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, newProtocolVersion, initCalldata, upgradeAddress: string, facetCuts: FacetCut[], - stmAddress: string, zksyncAddress: string, + genesisUpgradeAddress: string, + genesisBatchHash: string, + genesisIndexRepeatedStorageChanges: number, + genesisBatchCommitment: string, prepareDirectOperation?: boolean, chainId?: string ) { @@ -238,6 +212,21 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( initAddress: upgradeAddress, initCalldata }; + + let chainCreationDiamondCut: DiamondCutData = { + facetCuts: facetCuts.filter((cut) => cut.action == Action.Add), + initAddress: genesisUpgradeAddress, + initCalldata: '0x' + }; + + let chainCreationParams: ChainCreationParams = { + genesisUpgrade: genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, + diamondCut: chainCreationDiamondCut + }; + // Prepare calldata for STM let stm = new StateTransitionManagerFactory(); const stmUpgradeCalldata = stm.interface.encodeFunctionData('setNewVersionUpgrade', [ @@ -247,8 +236,9 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( newProtocolVersion ]); - const { scheduleCalldata: stmScheduleTransparentOperation, executeCalldata: stmExecuteOperation } = - prepareGovernanceTxs(stmAddress, stmUpgradeCalldata); + const stmSetChainCreationCalldata = stm.interface.encodeFunctionData('setChainCreationParams', [ + chainCreationParams + ]); // Prepare calldata for upgrading diamond proxy let adminFacet = new AdminFacetFactory(); @@ -257,30 +247,13 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( diamondCut ]); - const { - scheduleCalldata: scheduleTransparentOperation, - executeCalldata: executeOperation, - operation: governanceOperation - } = prepareGovernanceTxs(zksyncAddress, diamondProxyUpgradeCalldata); - - const newExecuteChainUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); - - const legacyScheduleTransparentOperation = adminFacet.interface.encodeFunctionData('executeUpgrade', [diamondCut]); - const { scheduleCalldata: legacyScheduleOperation, executeCalldata: legacyExecuteOperation } = prepareGovernanceTxs( - zksyncAddress, - legacyScheduleTransparentOperation - ); + const chainAdminUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); let result: any = { - stmScheduleTransparentOperation, - stmExecuteOperation, - scheduleTransparentOperation, - executeOperation, - newExecuteChainUpgradeCalldata, + stmUpgradeCalldata, + chainAdminUpgradeCalldata, diamondCut, - governanceOperation, - legacyScheduleOperation, - legacyExecuteOperation + stmSetChainCreationCalldata }; if (prepareDirectOperation) { @@ -290,13 +263,9 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( const stmDirecUpgradeCalldata = stm.interface.encodeFunctionData('executeUpgrade', [chainId, diamondCut]); - const { scheduleCalldata: stmScheduleOperationDirect, executeCalldata: stmExecuteOperationDirect } = - prepareGovernanceTxs(stmAddress, stmDirecUpgradeCalldata); - result = { ...result, - stmScheduleOperationDirect, - stmExecuteOperationDirect + stmDirecUpgradeCalldata }; } @@ -305,16 +274,16 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( export function buildDefaultUpgradeTx( environment, - diamondUpgradeProposalId, upgradeAddress, - l2UpgraderAddress, oldProtocolVersion, oldProtocolVersionDeadline, upgradeTimestamp, - newAllowList, - stmAddress, zksyncAddress, postUpgradeCalldataFlag, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation?, chainId? ) { @@ -389,21 +358,23 @@ export function buildDefaultUpgradeTx( bootloaderHash, defaultAAHash, cryptoVerifierAddress, - newAllowList, l2UpgradeTx ); let l1upgradeCalldata = prepareDefaultCalldataForL1upgrade(proposeUpgradeTx); - let upgradeData = prepareTransparentUpgradeCalldataForNewGovernance( + let upgradeData = prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, packedNewProtocolVersion, l1upgradeCalldata, upgradeAddress, facetCuts, - stmAddress, zksyncAddress, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation, chainId ); @@ -414,7 +385,6 @@ export function buildDefaultUpgradeTx( upgradeAddress, protocolVersionSemVer: newProtocolVersionSemVer, packedProtocolVersion: packedNewProtocolVersion, - diamondUpgradeProposalId, upgradeTimestamp, ...upgradeData }; @@ -423,31 +393,6 @@ export function buildDefaultUpgradeTx( console.log('Default upgrade transactions are generated'); } -async function sendTransaction( - calldata: BytesLike, - privateKey: string, - l1rpc: string, - to: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number -) { - const wallet = getWallet(l1rpc, privateKey); - gasPrice = gasPrice ?? (await wallet.provider.getGasPrice()); - nonce = nonce ?? (await wallet.getTransactionCount()); - const tx = await wallet.sendTransaction({ - to, - data: calldata, - value: 0, - gasLimit: 10_000_000, - gasPrice, - nonce - }); - console.log('Transaction hash: ', tx.hash); - await tx.wait(); - console.log('Transaction is executed'); -} - export function getWallet(l1rpc, privateKey) { if (!l1rpc) { l1rpc = web3Url(); @@ -462,99 +407,6 @@ export function getWallet(l1rpc, privateKey) { ).connect(provider); } -async function sendPreparedTx( - privateKey: string, - l1rpc: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - governanceAddr: string, - transactionsJsonField: string, - logText: string -) { - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - const calldata = transactions[transactionsJsonField]; - - console.log(`${logText} for protocolVersion ${transactions.protocolVersion}`); - await sendTransaction(calldata, privateKey, l1rpc, governanceAddr, environment, gasPrice, nonce); -} - -async function cancelUpgrade( - privateKey: string, - l1rpc: string, - zksyncAddress: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - execute: boolean, - newGovernanceAddress: string -) { - if (newGovernanceAddress != null) { - let wallet = getWallet(l1rpc, privateKey); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - let governance = GovernanceFactory.connect(newGovernanceAddress, wallet); - const operation = transactions.governanceOperation; - - const operationId = await governance.hashOperation(operation); - - console.log(`Cancel upgrade operation with id: ${operationId}`); - if (execute) { - const tx = await governance.cancel(operationId); - await tx.wait(); - console.log('Operation canceled'); - } else { - const calldata = governance.interface.encodeFunctionData('cancel', [operationId]); - console.log(`Cancel upgrade calldata: ${calldata}`); - } - } else { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - let wallet = getWallet(l1rpc, privateKey); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - const transparentUpgrade = transactions.transparentUpgrade; - const diamondUpgradeProposalId = transactions.diamondUpgradeProposalId; - - const proposalHash = await zkSync.upgradeProposalHash( - transparentUpgrade, - diamondUpgradeProposalId, - ethers.constants.HashZero - ); - - console.log(`Cancel upgrade with hash: ${proposalHash}`); - let cancelUpgradeCalldata = zkSync.interface.encodeFunctionData('cancelUpgradeProposal', [proposalHash]); - if (execute) { - await sendTransaction( - cancelUpgradeCalldata, - privateKey, - l1rpc, - zksyncAddress, - environment, - gasPrice, - nonce - ); - } else { - console.log(`Cancel upgrade calldata: ${cancelUpgradeCalldata}`); - } - } -} - -async function getNewDiamondUpgradeProposalId(l1rpc: string, zksyncAddress: string) { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - // We don't care about the wallet here, we just need to make a get call. - let wallet = getWallet(l1rpc, undefined); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - let proposalId = await zkSync.getCurrentProposalId(); - proposalId = proposalId.add(1); - console.log( - `New proposal id: ${proposalId} for ${zksyncAddress} network: ${JSON.stringify( - await wallet.provider.getNetwork() - )}` - ); - return proposalId; -} - export const command = new Command('transactions').description( 'prepare the transactions and their calldata for the upgrade' ); @@ -564,223 +416,31 @@ command .requiredOption('--upgrade-timestamp ') .option('--upgrade-address ') .option('--environment ') - .option('--new-allow-list ') - .option('--l2-upgrader-address ') - .option('--diamond-upgrade-proposal-id ') .option('--old-protocol-version ') .option('--old-protocol-version-deadline ') .option('--l1rpc ') .option('--zksync-address ') - .option('--state-transition-manager-address ') .option('--chain-id ') .option('--prepare-direct-operation ') - .option('--use-new-governance') - .option('--post-upgrade-calldata') + .option('--post-upgrade-calldata ') + .option('--genesis-upgrade-address ') + .option('--genesis-batch-hash ') + .option('--genesis-index-repeated-storage-changes ') + .option('--genesis-batch-commitment ') .action(async (options) => { - if (!options.useNewGovernance) { - // TODO(X): remove old governance functionality from the protocol upgrade tool - throw new Error('Old governance is not supported anymore'); - } - - let diamondUpgradeProposalId = options.diamondUpgradeProposalId; - if (!diamondUpgradeProposalId && !options.useNewGovernance) { - diamondUpgradeProposalId = await getNewDiamondUpgradeProposalId(options.l1rpc, options.zksyncAddress); - } - buildDefaultUpgradeTx( options.environment, - diamondUpgradeProposalId, options.upgradeAddress, - options.l2UpgraderAddress, options.oldProtocolVersion, options.oldProtocolVersionDeadline, options.upgradeTimestamp, - options.newAllowList, - options.stateTransitionManagerAddress, options.zksyncAddress, options.postUpgradeCalldata, + options.genesisUpgradeAddress, + options.genesisBatchHash, + options.genesisIndexRepeatedStorageChanges, + options.genesisBatchCommitment, options.prepareDirectOperation, options.chainId ); }); - -command - .command('propose-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleTransparentOperation', - 'Proposing upgrade for STM' - ); - }); - -command - .command('execute-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperation', - 'Executing upgrade for STM' - ); - }); - -command - .command('propose-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'scheduleTransparentOperation', - 'Proposing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('execute-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'executeOperation', - 'Executing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('propose-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('execute-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('cancel-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--execute') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await cancelUpgrade( - options.privateKey, - options.l1rpc, - options.zksyncAddress, - options.environment, - options.gasPrice, - options.nonce, - options.execute, - options.newGovernance - ); - }); diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 063777a671b..dc716a0b257 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -16,7 +16,8 @@ const IMAGES = [ 'prover-job-monitor', 'proof-fri-gpu-compressor', 'snapshots-creator', - 'verified-sources-fetcher' + 'verified-sources-fetcher', + 'prover-autoscaler' ]; const DOCKER_REGISTRIES = ['us-docker.pkg.dev/matterlabs-infra/matterlabs-docker', 'matterlabs']; @@ -76,7 +77,8 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'contract-verifier', 'prover-fri-gateway', 'prover-job-monitor', - 'snapshots-creator' + 'snapshots-creator', + 'prover-autoscaler' ].includes(image) ? ['latest', 'latest2.0', `2.0-${imageTagSha}`, `${imageTagSha}`, `2.0-${imageTagShaTS}`, `${imageTagShaTS}`] : [`latest2.0`, 'latest']; diff --git a/infrastructure/zk/src/fmt.ts b/infrastructure/zk/src/fmt.ts index e58cdbc8e54..b9f7f1b9d60 100644 --- a/infrastructure/zk/src/fmt.ts +++ b/infrastructure/zk/src/fmt.ts @@ -48,7 +48,7 @@ export async function rustfmt(check: boolean = false) { const dirs = [ process.env.ZKSYNC_HOME as string, `${process.env.ZKSYNC_HOME}/prover`, - `${process.env.ZKSYNC_HOME}/zk_toolbox` + `${process.env.ZKSYNC_HOME}/zkstack_cli` ]; for (const dir of dirs) { diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts index 7a24881c0f9..49ae4d0753e 100644 --- a/infrastructure/zk/src/lint.ts +++ b/infrastructure/zk/src/lint.ts @@ -38,12 +38,12 @@ async function proverClippy() { await utils.spawn('cargo clippy --tests --locked -- -D warnings'); } -async function toolboxClippy() { - process.chdir(`${process.env.ZKSYNC_HOME}/zk_toolbox`); +async function zkstackClippy() { + process.chdir(`${process.env.ZKSYNC_HOME}/zkstack_cli`); await utils.spawn('cargo clippy --tests --locked -- -D warnings'); } -const ARGS = [...EXTENSIONS, 'rust', 'prover', 'contracts', 'toolbox'] as const; +const ARGS = [...EXTENSIONS, 'rust', 'prover', 'contracts', 'zkstack_cli'] as const; export const command = new Command('lint') .description('lint code') @@ -61,8 +61,8 @@ export const command = new Command('lint') case 'contracts': await lintContracts(cmd.check); break; - case 'toolbox': - await toolboxClippy(); + case 'zkstack_cli': + await zkstackClippy(); break; default: await lint(extension, cmd.check); @@ -72,7 +72,7 @@ export const command = new Command('lint') promises.push(lintContracts(cmd.check)); promises.push(clippy()); promises.push(proverClippy()); - promises.push(toolboxClippy()); + promises.push(zkstackClippy()); await Promise.all(promises); } }); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index d433c4e1a3b..928d105582f 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -46,6 +46,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -92,9 +93,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", @@ -107,33 +108,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -168,9 +169,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -208,6 +209,18 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "async-broadcast" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cd0e2e25ea8e5f7e9df04578dc6cf5c83577fd09b1a46aaf5c85e1c33f2a7e" +dependencies = [ + "event-listener", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -225,20 +238,20 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -287,11 +300,11 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e89b6941c2d1a7045538884d6e760ccfffdf8e1ffc2613d8efa74305e1f3752" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" dependencies = [ - "bindgen 0.69.4", + "bindgen", "cc", "cmake", "dunce", @@ -311,9 +324,9 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-util", "itoa", "matchit", @@ -344,7 +357,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", "mime", "pin-project-lite", @@ -355,11 +368,22 @@ dependencies = [ "tracing", ] +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom", + "instant", + "rand 0.8.5", +] + [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" dependencies = [ "addr2line", "cc", @@ -423,7 +447,7 @@ checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" dependencies = [ "autocfg", "libm", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-integer", "num-traits", ] @@ -437,29 +461,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.59.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "clap 2.34.0", - "env_logger 0.9.3", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2 1.0.86", - "quote 1.0.36", - "regex", - "rustc-hash", - "shlex", - "which", -] - [[package]] name = "bindgen" version = "0.69.4" @@ -474,12 +475,12 @@ dependencies = [ "lazycell", "log", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "regex", "rustc-hash", "shlex", - "syn 2.0.72", + "syn 2.0.66", "which", ] @@ -650,9 +651,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f9a6d958dd58a0899737e5a1fc6597aefcf7980bf8be5be5329e701cbd45ca" +checksum = "98c681a3f867afe40bcc188e5cb5260bbf5699531823affa3cbe28f7ca9b7bc9" dependencies = [ "boojum", "cmake", @@ -664,25 +665,25 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028" dependencies = [ "borsh-derive", - "cfg_aliases 0.2.1", + "cfg_aliases", ] [[package]] name = "borsh-derive" -version = "1.5.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0" dependencies = [ "once_cell", - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro-crate 2.0.0", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", "syn_derive", ] @@ -693,7 +694,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", - "regex-automata 0.4.7", + "regex-automata 0.4.6", "serde", ] @@ -726,7 +727,7 @@ version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -781,12 +782,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" -[[package]] -name = "cfg_aliases" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" - [[package]] name = "chrono" version = "0.4.38" @@ -799,16 +794,16 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] name = "circuit_definitions" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b532214f063e5e0ee5c0fc1d3afd56dec541efa68b8985f14cc55cc324f4c48" +checksum = "492404ea63c934d8e894325f0a741723bf91cd035cb34a92fddd8617c4a00fd3" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.6", "crossbeam", "derivative", "seq-macro", @@ -854,14 +849,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" +checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" dependencies = [ "derivative", "serde", - "zk_evm 0.150.5", - "zkevm_circuits 0.150.5", + "zk_evm 0.150.6", + "zkevm_circuits 0.150.6", ] [[package]] @@ -921,11 +916,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" +checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.6", "derivative", "rayon", "serde", @@ -960,9 +955,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.11" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35723e6a11662c2afb578bcf0b88bf6ea8e21282a953428f240574fcc3a2b5b3" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -970,9 +965,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.11" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49eb96cbfa7cfa35017b7cd548c75b14c3118c98b423041d70562665e07fb0fa" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -982,21 +977,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.11" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "cmake" @@ -1018,9 +1013,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "colored" @@ -1097,7 +1092,7 @@ version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "unicode-xid 0.2.4", ] @@ -1279,7 +1274,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24cf603ca4299c6e20e644da88897f7b81d688510f4887e818b0bfe0b792081b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -1291,7 +1286,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -1326,9 +1321,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -1337,8 +1332,18 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] @@ -1349,23 +1354,57 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "strsim 0.10.0", "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.85", + "quote 1.0.36", + "strsim 0.11.1", + "syn 2.0.66", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core 0.20.10", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "debug-map-sorted" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c7dfa83618734bf9fa07aadaa1166b634e9427bb9bc5a1c2332d04d73fb721" +dependencies = [ + "itertools 0.10.5", +] + [[package]] name = "debugid" version = "0.8.0" @@ -1413,7 +1452,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -1433,9 +1472,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", "unicode-xid 0.2.4", ] @@ -1509,6 +1548,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + [[package]] name = "ecdsa" version = "0.14.8" @@ -1562,9 +1607,9 @@ dependencies = [ [[package]] name = "either" -version = "1.13.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" dependencies = [ "serde", ] @@ -1640,16 +1685,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" dependencies = [ "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "env_filter" -version = "0.1.2" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f2c92ceda6ceec50f43169f9ee8424fe2db276791afde7b2cd8bc084cb376ab" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" dependencies = [ "log", ] @@ -1669,9 +1714,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.5" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13fa619b91fb2381732789fc5de83b45675e882f66623b7d8cb4f643017018d" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" dependencies = [ "anstream", "anstyle", @@ -1696,9 +1741,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f0d6e329b2c11d134c3140951209be968ef316ed64ddde75640eaed7f10264" +checksum = "c1e1990fee6e9d25b40524ce53ca7977a211155a17bc7277f4dd354633e4fc22" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1707,9 +1752,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060e8186234c7a281021fb95614e06e94e1fc7ab78938360a5c27af0f8fc6105" +checksum = "d84e8d300c28cd91ceb56340f66da8607409f44a45f5e694e23723630db8c852" dependencies = [ "serde_json", ] @@ -1790,6 +1835,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "2.1.0" @@ -1868,6 +1923,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "fluent-uri" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17c704e9dbe1ddd863da1e6ff3567795087b1eb201ce80d8fa81162e1516500d" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "flume" version = "0.11.0" @@ -1928,7 +1992,7 @@ dependencies = [ "indexmap 1.9.3", "itertools 0.10.5", "lazy_static", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-derive", "num-integer", "num-traits", @@ -2043,9 +2107,9 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -2306,6 +2370,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "handlebars" +version = "3.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3" +dependencies = [ + "log", + "pest", + "pest_derive", + "quick-error 2.0.1", + "serde", + "serde_json", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -2334,6 +2412,30 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "headers" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +dependencies = [ + "base64 0.21.7", + "bytes", + "headers-core", + "http 1.1.0", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http 1.1.0", +] + [[package]] name = "heck" version = "0.3.3" @@ -2443,9 +2545,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", "http 1.1.0", @@ -2460,15 +2562,15 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.9.4" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -2484,9 +2586,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", @@ -2508,16 +2610,16 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", "futures-util", "h2 0.4.5", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "httparse", "httpdate", "itoa", @@ -2527,6 +2629,26 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-http-proxy" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d06dbdfbacf34d996c6fb540a71a684a7aae9056c71951163af8a8a4c07b9a4" +dependencies = [ + "bytes", + "futures-util", + "headers", + "http 1.1.0", + "hyper 1.3.1", + "hyper-rustls", + "hyper-util", + "pin-project-lite", + "rustls-native-certs", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "hyper-rustls" version = "0.27.2" @@ -2535,10 +2657,11 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-util", "log", "rustls", + "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", @@ -2551,7 +2674,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.4.1", + "hyper 1.3.1", "hyper-util", "pin-project-lite", "tokio", @@ -2565,7 +2688,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.30", + "hyper 0.14.29", "native-tls", "tokio", "tokio-native-tls", @@ -2579,7 +2702,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-util", "native-tls", "tokio", @@ -2589,16 +2712,16 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.1", - "hyper 1.4.1", + "http-body 1.0.0", + "hyper 1.3.1", "pin-project-lite", "socket2", "tokio", @@ -2679,7 +2802,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -2716,6 +2839,15 @@ dependencies = [ "regex", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -2733,9 +2865,9 @@ dependencies = [ [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" [[package]] name = "itertools" @@ -2812,9 +2944,9 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -2828,6 +2960,44 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b1fb8864823fad91877e6caea0baca82e49e8db50f8e5c9f9a453e27d3330fc" +dependencies = [ + "jsonptr", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonpath-rust" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d8fe85bd70ff715f31ce8c739194b423d79811a19602115d611a3ec85d6200" +dependencies = [ + "lazy_static", + "once_cell", + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonptr" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c6e529149475ca0b2820835d3dce8fcc41c6b943ca608d32f35b449255e4627" +dependencies = [ + "fluent-uri", + "serde", + "serde_json", +] + [[package]] name = "jsonrpsee" version = "0.23.2" @@ -2882,7 +3052,7 @@ dependencies = [ "futures-timer", "futures-util", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", "jsonrpsee-types", "pin-project", @@ -2904,8 +3074,8 @@ checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" dependencies = [ "async-trait", "base64 0.22.1", - "http-body 1.0.1", - "hyper 1.4.1", + "http-body 1.0.0", + "hyper 1.3.1", "hyper-rustls", "hyper-util", "jsonrpsee-core", @@ -2929,9 +3099,9 @@ checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3013,12 +3183,135 @@ dependencies = [ ] [[package]] -name = "keccak" -version = "0.1.5" +name = "k8s-openapi" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8847402328d8301354c94d605481f25a6bdc1ed65471fd96af8eca71141b13" +dependencies = [ + "base64 0.22.1", + "chrono", + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "kube" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-derive", + "kube-runtime", +] + +[[package]] +name = "kube-client" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" +dependencies = [ + "base64 0.22.1", + "bytes", + "chrono", + "either", + "futures 0.3.30", + "home", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-http-proxy", + "hyper-rustls", + "hyper-timeout", + "hyper-util", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "rustls", + "rustls-pemfile 2.1.2", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" +dependencies = [ + "chrono", + "form_urlencoded", + "http 1.1.0", + "json-patch", + "k8s-openapi", + "schemars", + "serde", + "serde-value", + "serde_json", + "thiserror", +] + +[[package]] +name = "kube-derive" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" +dependencies = [ + "darling 0.20.10", + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_json", + "syn 2.0.66", +] + +[[package]] +name = "kube-runtime" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" dependencies = [ - "cpufeatures", + "ahash 0.8.11", + "async-broadcast", + "async-stream", + "async-trait", + "backoff", + "derivative", + "futures 0.3.30", + "hashbrown 0.14.5", + "json-patch", + "jsonptr", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-util", + "tracing", ] [[package]] @@ -3044,12 +3337,12 @@ checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -3099,9 +3392,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "logos" @@ -3120,10 +3413,10 @@ checksum = "dc487311295e0002e452025d6b580b77bb17286de87b57138f3b5db711cded68" dependencies = [ "beef", "fnv", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "regex-syntax 0.6.29", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3174,9 +3467,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "miette" @@ -3196,9 +3489,9 @@ version = "5.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3209,9 +3502,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.5" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", @@ -3225,23 +3518,22 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.4" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] [[package]] name = "mio" -version = "1.0.1" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ - "hermit-abi 0.3.9", "libc", "wasi", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -3292,7 +3584,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" dependencies = [ "either", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "serde", "syn 1.0.109", @@ -3306,7 +3598,7 @@ checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ "bitflags 2.6.0", "cfg-if", - "cfg_aliases 0.1.1", + "cfg_aliases", "libc", ] @@ -3342,7 +3634,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-complex", "num-integer", "num-iter", @@ -3363,9 +3655,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ "num-integer", "num-traits", @@ -3452,7 +3744,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-integer", "num-traits", "serde", @@ -3503,9 +3795,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3515,9 +3807,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3528,9 +3820,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" +checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" dependencies = [ "memchr", ] @@ -3568,9 +3860,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3729,9 +4021,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "a1b5927e4a9ae8d6cdb6a69e4e04a0ec73381a358e21b8a576f44769f34e7c24" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -3743,12 +4035,12 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro-crate 2.0.0", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -3777,9 +4069,9 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -3788,12 +4080,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "3.0.4" @@ -3819,6 +4105,51 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "pest_meta" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" +dependencies = [ + "once_cell", + "pest", + "sha2 0.10.8", +] + [[package]] name = "petgraph" version = "0.6.5" @@ -3844,9 +4175,9 @@ version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3953,8 +4284,8 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ - "proc-macro2 1.0.86", - "syn 2.0.72", + "proc-macro2 1.0.85", + "syn 2.0.66", ] [[package]] @@ -3989,6 +4320,15 @@ dependencies = [ "toml_edit 0.19.15", ] +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.2", +] + [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -4005,7 +4345,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", "version_check", @@ -4017,7 +4357,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "version_check", ] @@ -4039,18 +4379,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus-client" -version = "0.22.3" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" dependencies = [ "dtoa", "itoa", @@ -4064,16 +4404,16 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "proptest" -version = "1.5.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", @@ -4083,7 +4423,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.3", "rusty-fork", "tempfile", "unarray", @@ -4126,7 +4466,7 @@ dependencies = [ "prost 0.12.6", "prost-types", "regex", - "syn 2.0.72", + "syn 2.0.66", "tempfile", ] @@ -4138,9 +4478,9 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.12.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -4151,9 +4491,9 @@ checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" dependencies = [ "anyhow", "itertools 0.13.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -4217,7 +4557,7 @@ dependencies = [ "bincode", "chrono", "circuit_definitions", - "clap 4.5.11", + "clap 4.5.4", "colored", "dialoguer", "hex", @@ -4263,7 +4603,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -4274,6 +4614,12 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + [[package]] name = "quick-protobuf" version = "0.8.1" @@ -4298,7 +4644,7 @@ version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", ] [[package]] @@ -4414,9 +4760,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ "bitflags 2.6.0", ] @@ -4429,8 +4775,8 @@ checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -4444,13 +4790,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.3", ] [[package]] @@ -4461,9 +4807,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "rend" @@ -4488,7 +4834,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.30", + "hyper 0.14.29", "hyper-tls 0.5.0", "ipnet", "js-sys", @@ -4528,9 +4874,9 @@ dependencies = [ "futures-util", "h2 0.4.5", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-rustls", "hyper-tls 0.6.0", "hyper-util", @@ -4661,7 +5007,7 @@ version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -4770,9 +5116,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", "rustls-pemfile 2.1.2", @@ -4835,9 +5181,9 @@ checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ "aws-lc-rs", "ring", @@ -4858,7 +5204,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error", + "quick-error 1.2.3", "tempfile", "wait-timeout", ] @@ -4887,6 +5233,30 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "schemars" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_derive_internals", + "syn 2.0.66", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -4951,28 +5321,29 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ + "serde", "zeroize", ] [[package]] name = "security-framework" -version = "2.11.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -5129,16 +5500,27 @@ version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "serde_json" -version = "1.0.121" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -5186,8 +5568,8 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", - "proc-macro2 1.0.86", + "darling 0.13.4", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -5300,9 +5682,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebb6d928451f0779f14da02ee9d51d4bde560328edc6471f0d5c5c11954345c4" +checksum = "92776ca824f49c255a7417939706d759e0fd3dd4217420d01da68beae04f0bd6" dependencies = [ "bincode", "blake2 0.10.6", @@ -5366,7 +5748,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-traits", "thiserror", "time", @@ -5463,10 +5845,11 @@ checksum = "c85070f382340e8b23a75808e83573ddf65f9ad9143df9573ca37c1ed2ee956a" [[package]] name = "sqlformat" -version = "0.2.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f895e3734318cc55f1fe66258926c9b910c124d47520339efecbb6c59cec7c1f" +checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" dependencies = [ + "itertools 0.12.1", "nom", "unicode_categories", ] @@ -5534,11 +5917,11 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "sqlx-core", "sqlx-macros-core", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -5552,7 +5935,7 @@ dependencies = [ "heck 0.5.0", "hex", "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "serde", "serde_json", @@ -5561,7 +5944,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.72", + "syn 2.0.66", "tempfile", "tokio", "url", @@ -5640,7 +6023,7 @@ dependencies = [ "log", "md-5", "memchr", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "once_cell", "rand 0.8.5", "rust_decimal", @@ -5739,7 +6122,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -5760,17 +6143,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "rustversion", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "subtle" -version = "2.6.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" @@ -5789,18 +6172,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.72" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "unicode-ident", ] @@ -5812,9 +6195,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -5889,7 +6272,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dffced63c2b5c7be278154d76b479f9f9920ed34e7574201407f0b14e2bbb93" dependencies = [ - "env_logger 0.11.5", + "env_logger 0.11.3", "test-log-macros", "tracing-subscriber", ] @@ -5900,9 +6283,9 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -5916,22 +6299,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6004,9 +6387,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -6019,31 +6402,32 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", "libc", "mio", + "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6089,14 +6473,15 @@ dependencies = [ "futures-io", "futures-sink", "pin-project-lite", + "slab", "tokio", ] [[package]] name = "toml_datetime" -version = "0.6.7" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fb9f64314842840f1d940ac544da178732128f1c78c21772e876579e0da1db" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" [[package]] name = "toml_edit" @@ -6120,6 +6505,17 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +dependencies = [ + "indexmap 2.2.6", + "toml_datetime", + "winnow", +] + [[package]] name = "toml_edit" version = "0.21.1" @@ -6144,9 +6540,9 @@ dependencies = [ "bytes", "h2 0.4.5", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -6181,6 +6577,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "base64 0.21.7", + "bitflags 2.6.0", + "bytes", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -6211,9 +6626,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6287,6 +6702,27 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-test" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "557b891436fe0d5e0e363427fc7f217abf9ccd510d5136549847bdcbcd011d68" +dependencies = [ + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" +dependencies = [ + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -6305,6 +6741,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -6422,9 +6864,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.10.0" +version = "2.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72139d247e5f97a3eff96229a7ae85ead5328a39efe76f8bf5a06313d505b6ea" +checksum = "d11a831e3c0b56e438a28308e7c810799e3c118417f342d30ecec080105395cd" dependencies = [ "base64 0.22.1", "log", @@ -6453,15 +6895,15 @@ checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" [[package]] name = "utf8parse" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.10.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ "serde", ] @@ -6486,9 +6928,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vise" @@ -6510,7 +6952,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "671d3b894d5d0849f0a597f56bf071f42d4f2a1cbcf2f78ca21f870ab7c0cc2b" dependencies = [ - "hyper 0.14.30", + "hyper 0.14.29", "once_cell", "tokio", "tracing", @@ -6523,9 +6965,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a511871dc5de990a3b2a0e715facfbc5da848c0c0395597a1415029fb7c250a" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6587,9 +7029,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", "wasm-bindgen-shared", ] @@ -6621,9 +7063,9 @@ version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6669,9 +7111,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "3c452ad30530b54a4d8e71952716a212b08efd0f3562baa66c29a618b07da7c3" dependencies = [ "rustls-pki-types", ] @@ -6735,7 +7177,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -6753,7 +7195,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -6773,18 +7215,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -6795,9 +7237,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -6807,9 +7249,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -6819,15 +7261,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" [[package]] name = "windows_i686_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -6837,9 +7279,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -6849,9 +7291,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -6861,9 +7303,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -6873,9 +7315,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -6923,22 +7365,22 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6956,9 +7398,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -7029,9 +7471,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" +checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" dependencies = [ "anyhow", "lazy_static", @@ -7039,7 +7481,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.5", + "zk_evm_abstractions 0.150.6", ] [[package]] @@ -7070,35 +7512,35 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" +checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", ] [[package]] name = "zkevm-assembly" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99106038062537c05b4e6e7754d1bbba28ba16185a3e5ee5ad22e2f8be883bb" +checksum = "5dc743ac7b0d618536dc3ace798fd4b8af78b057884afda5785c7970e15d62d0" dependencies = [ "env_logger 0.9.3", "hex", "lazy_static", "log", "nom", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-traits", "sha3 0.10.8", "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", ] [[package]] @@ -7147,9 +7589,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" +checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7161,7 +7603,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", "zksync_cs_derive", ] @@ -7209,9 +7651,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" +checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7226,13 +7668,13 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "550f82d3b7448c35168dc13bfadbccd5fd306097b6e1ea01793151c1c9137a36" +checksum = "73ad3e73d290a38a35dd245fd68cb6f498a8a8da4a52f846e88da3d3c31a34fd" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "codegen", "crossbeam", "derivative", @@ -7253,11 +7695,10 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86511b3957adfe415ecdbd1ee01c51aa3ca131a607e61ca024976312f613b0f9" +checksum = "d555e24b853359c5b076c52f9ff9e0ed62a7edc8c2f82f93517c524410c21ecb" dependencies = [ - "bindgen 0.59.2", "cmake", "crossbeam", "derivative", @@ -7269,9 +7710,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e4c00f2db603d1b696bc2e9d822bb4c087050de5b65559067fc2232786cbc93" +checksum = "615dad34e5fe678ec3b3e029af3f19313bebb1b771a8ce963c9ab9a8cc3879d3" dependencies = [ "bit-vec", "cfg-if", @@ -7286,9 +7727,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58df1ec10e0d5eb58563bb01abda5ed185c9b9621502e361848ca40eb7868ac" +checksum = "80721b2da2643bd43f664ac65673ee078e6973c0a88d75b73bfaeac8e1bf5432" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -7344,7 +7785,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "clap 4.5.11", + "clap 4.5.4", "shivini", "tokio", "tokio-util", @@ -7366,9 +7807,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -7391,8 +7832,12 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -7402,9 +7847,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -7412,7 +7857,7 @@ dependencies = [ "elliptic-curve 0.13.8", "hex", "k256 0.13.3", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-traits", "rand 0.8.5", "sha3 0.10.8", @@ -7423,14 +7868,14 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", "hex", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "prost 0.12.6", "rand 0.8.5", "serde", @@ -7445,9 +7890,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" +checksum = "b2b2aab4ed18b13cd584f4edcc2546c8da82f89ac62e525063e12935ff28c9be" dependencies = [ "anyhow", "async-trait", @@ -7465,9 +7910,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand 0.8.5", @@ -7526,7 +7971,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5939e2df4288c263c706ff18ac718e984149223ad4289d6d957d767dcfc04c81" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -7552,10 +7997,13 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_consensus_storage", + "zksync_consensus_utils", "zksync_contracts", "zksync_db_connection", + "zksync_l1_contract_interface", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -7615,7 +8063,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -7637,10 +8086,10 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f91e58e75d65877f09f83bc3dca8f054847ae7ec4f3e64bfa610a557edd8e8e" dependencies = [ - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-integer", "num-traits", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "serde", "syn 1.0.109", @@ -7648,9 +8097,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" +checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" dependencies = [ "boojum", "derivative", @@ -7660,7 +8109,23 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.5", + "zkevm_circuits 0.150.6", +] + +[[package]] +name = "zksync_l1_contract_interface" +version = "0.1.0" +dependencies = [ + "anyhow", + "hex", + "once_cell", + "sha2 0.10.8", + "sha3 0.10.8", + "zksync_kzg", + "zksync_prover_interface", + "zksync_solidity_vk_codegen", + "zksync_system_constants", + "zksync_types", ] [[package]] @@ -7682,7 +8147,8 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", + "ethabi", "hex", "itertools 0.10.5", "once_cell", @@ -7693,7 +8159,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_contracts", "zksync_mini_merkle_tree", "zksync_system_constants", @@ -7746,8 +8212,8 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.5", - "clap 4.5.11", + "circuit_sequencer_api 0.150.6", + "clap 4.5.4", "ctrlc", "futures 0.3.30", "reqwest 0.12.5", @@ -7775,9 +8241,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -7796,19 +8262,19 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck 0.5.0", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "prost-build", "prost-reflect", "protox", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -7822,6 +8288,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -7830,6 +8297,45 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_prover_autoscaler" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "axum", + "chrono", + "clap 4.5.4", + "ctrlc", + "debug-map-sorted", + "futures 0.3.30", + "k8s-openapi", + "kube", + "once_cell", + "regex", + "reqwest 0.12.5", + "ring", + "rustls", + "serde", + "serde_json", + "structopt", + "strum", + "time", + "tokio", + "tracing", + "tracing-subscriber", + "tracing-test", + "url", + "vise", + "zksync_config", + "zksync_core_leftovers", + "zksync_protobuf_config", + "zksync_prover_job_monitor", + "zksync_types", + "zksync_utils", + "zksync_vlog", +] + [[package]] name = "zksync_prover_dal" version = "0.1.0" @@ -7847,7 +8353,7 @@ dependencies = [ "anyhow", "async-trait", "circuit_definitions", - "clap 4.5.11", + "clap 4.5.4", "ctrlc", "futures 0.3.30", "local-ip-address", @@ -7880,7 +8386,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.5.11", + "clap 4.5.4", "ctrlc", "futures 0.3.30", "log", @@ -7933,7 +8439,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "serde", "serde_with", "strum", @@ -7949,7 +8455,7 @@ dependencies = [ "anyhow", "async-trait", "axum", - "clap 4.5.11", + "clap 4.5.4", "ctrlc", "serde", "tokio", @@ -7999,6 +8505,23 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_solidity_vk_codegen" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b310ab8a21681270e73f177ddf7974cabb7a96f0624ab8b008fd6ee1f9b4f687" +dependencies = [ + "ethereum-types", + "franklin-crypto", + "handlebars", + "hex", + "paste", + "rescue_poseidon", + "serde", + "serde_derive", + "serde_json", +] + [[package]] name = "zksync_system_constants" version = "0.1.0" @@ -8025,7 +8548,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -8033,7 +8555,6 @@ dependencies = [ "thiserror", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", @@ -8070,7 +8591,7 @@ version = "0.1.0" dependencies = [ "anyhow", "circuit_definitions", - "clap 4.5.11", + "clap 4.5.4", "indicatif", "proptest", "toml_edit 0.14.4", @@ -8111,20 +8632,20 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", + "zk_evm_abstractions 0.150.6", + "zkevm_opcode_defs 0.150.6", "zksync_vm2_interface", ] [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "primitive-types", ] @@ -8211,7 +8732,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "clap 4.5.11", + "clap 4.5.4", "ctrlc", "tokio", "tracing", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index e95bae3d4c1..af022e691c1 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -1,8 +1,5 @@ [workspace] -members = [ - "crates/bin/*", - "crates/lib/*", -] +members = ["crates/bin/*", "crates/lib/*"] resolver = "2" @@ -19,20 +16,23 @@ categories = ["cryptography"] [workspace.dependencies] # Common dependencies anyhow = "1.0" -axum = "0.7.5" async-trait = "0.1" +axum = "0.7.5" bincode = "1" chrono = "0.4.38" clap = "4.4.6" colored = "2.0" const-decoder = "0.3.0" ctrlc = "3.1" +debug-map-sorted = "0.1.1" dialoguer = "0.11" futures = "0.3" hex = "0.4" -itertools = "0.10.5" indicatif = "0.16" +itertools = "0.10.5" jemallocator = "0.5" +k8s-openapi = { version = "0.23.0", features = ["v1_30"] } +kube = { version = "0.95.0", features = ["runtime", "derive"] } local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" @@ -42,6 +42,8 @@ queues = "1.1.0" rand = "0.8" regex = "1.10.4" reqwest = "0.12" +ring = "0.17.8" +rustls = { version = "0.23.12", features = ["ring"] } serde = "1.0" serde_derive = "1.0" serde_json = "1.0" @@ -50,21 +52,24 @@ sqlx = { version = "0.8.1", default-features = false } structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" +time = "0.3.36" tokio = "1" tokio-util = "0.7.11" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" +tracing-test = "0.2.5" +url = "2.5.2" vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.5" -circuit_sequencer_api = "=0.150.5" -zkevm_test_harness = "=0.150.5" +circuit_definitions = "=0.150.6" +circuit_sequencer_api = "=0.150.6" +zkevm_test_harness = "=0.150.6" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.9" } -shivini = "=0.150.9" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.0" } +shivini = "=0.151.0" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } @@ -84,6 +89,7 @@ zksync_eth_client = { path = "../core/lib/eth_client" } zksync_contracts = { path = "../core/lib/contracts" } zksync_core_leftovers = { path = "../core/lib/zksync_core_leftovers" } zksync_periodic_job = { path = "../core/lib/periodic_job" } +zksync_protobuf_config = { path = "../core/lib/protobuf_config" } # Prover workspace dependencies zksync_prover_dal = { path = "crates/lib/prover_dal" } @@ -91,6 +97,7 @@ zksync_prover_fri_types = { path = "crates/lib/prover_fri_types" } zksync_prover_fri_utils = { path = "crates/lib/prover_fri_utils" } zksync_prover_keystore = { path = "crates/lib/keystore" } zksync_vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } +zksync_prover_job_monitor = { path = "crates/bin/prover_job_monitor" } # for `perf` profiling [profile.perf] diff --git a/prover/crates/bin/prover_autoscaler/Cargo.toml b/prover/crates/bin/prover_autoscaler/Cargo.toml new file mode 100644 index 00000000000..fbf3ecae909 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "zksync_prover_autoscaler" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_core_leftovers.workspace = true +zksync_vlog.workspace = true +zksync_utils.workspace = true +zksync_types.workspace = true +zksync_config = { workspace = true, features = ["observability_ext"] } +zksync_prover_job_monitor.workspace = true +zksync_protobuf_config.workspace = true + +debug-map-sorted.workspace = true +anyhow.workspace = true +async-trait.workspace = true +axum.workspace = true +chrono.workspace = true +clap = { workspace = true, features = ["derive"] } +ctrlc = { workspace = true, features = ["termination"] } +futures.workspace = true +k8s-openapi = { workspace = true, features = ["v1_30"] } +kube = { workspace = true, features = ["runtime", "derive"] } +once_cell.workspace = true +regex.workspace = true +reqwest = { workspace = true, features = ["json"] } +ring.workspace = true +rustls = { workspace = true, features = ["ring"] } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +structopt.workspace = true +strum.workspace = true +time.workspace = true +tokio = { workspace = true, features = ["time", "macros"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing.workspace = true +url.workspace = true +vise.workspace = true +tracing-test.workspace = true diff --git a/prover/crates/bin/prover_autoscaler/src/agent.rs b/prover/crates/bin/prover_autoscaler/src/agent.rs new file mode 100644 index 00000000000..f810bc41672 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/agent.rs @@ -0,0 +1,130 @@ +use std::net::SocketAddr; + +use anyhow::Context as _; +use axum::{ + extract::State, + response::IntoResponse, + routing::{get, post}, + Json, Router, +}; +use futures::future; +use reqwest::StatusCode; +use serde::{Deserialize, Serialize}; +use tokio::sync::watch; + +use crate::{ + cluster_types::Cluster, + k8s::{Scaler, Watcher}, +}; + +struct AppError(anyhow::Error); + +impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Something went wrong: {}", self.0), + ) + .into_response() + } +} + +pub async fn run_server( + port: u16, + watcher: Watcher, + scaler: Scaler, + mut stop_receiver: watch::Receiver, +) -> anyhow::Result<()> { + let bind_address = SocketAddr::from(([0, 0, 0, 0], port)); + tracing::debug!("Starting Autoscaler agent on {bind_address}"); + let app = create_agent_router(watcher, scaler); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding Autoscaler agent to {bind_address}"))?; + axum::serve(listener, app) + .with_graceful_shutdown(async move { + if stop_receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for Autoscaler agent was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, Autoscaler agent is shutting down"); + }) + .await + .context("Autoscaler agent failed")?; + tracing::info!("Autoscaler agent shut down"); + Ok(()) +} + +fn create_agent_router(watcher: Watcher, scaler: Scaler) -> Router { + let app = App { watcher, scaler }; + Router::new() + .route("/healthz", get(health)) + .route("/cluster", get(get_cluster)) + .route("/scale", post(scale)) + .with_state(app) +} + +// TODO: Use +// https://github.com/matter-labs/zksync-era/blob/9821a20018c367ce246dba656daab5c2e7757973/core/node/api_server/src/healthcheck.rs#L53 +// instead. +async fn health() -> &'static str { + "Ok\n" +} + +#[derive(Clone)] +struct App { + watcher: Watcher, + scaler: Scaler, +} + +async fn get_cluster(State(app): State) -> Result, AppError> { + let cluster = app.watcher.cluster.lock().await.clone(); + Ok(Json(cluster)) +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct ScaleDeploymentRequest { + pub namespace: String, + pub name: String, + pub size: i32, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct ScaleRequest { + pub deployments: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScaleResponse { + pub scale_result: Vec, +} + +/// To test or forse scale in particular cluster use: +/// $ curl -X POST -H "Content-Type: application/json" --data '{"deployments": [{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-f", "size":0},{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-c", "size":0}]}' :8081/scale +async fn scale( + State(app): State, + Json(payload): Json, +) -> Result, AppError> { + let handles: Vec<_> = payload + .deployments + .into_iter() + .map(|d| { + let s = app.scaler.clone(); + tokio::spawn(async move { + match s.scale(&d.namespace, &d.name, d.size).await { + Ok(()) => "".to_string(), + Err(err) => err.to_string(), + } + }) + }) + .collect(); + + let scale_result = future::join_all(handles) + .await + .into_iter() + .map(Result::unwrap) + .collect(); + Ok(Json(ScaleResponse { scale_result })) +} diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs new file mode 100644 index 00000000000..b800b86f3c2 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -0,0 +1,60 @@ +use std::collections::{BTreeMap, HashMap}; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize, Serializer}; +use strum::{Display, EnumString}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Pod { + // pub name: String, // TODO: Consider if it's needed. + pub owner: String, + pub status: String, + pub changed: DateTime, +} +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Deployment { + // pub name: String, // TODO: Consider if it's needed. + pub running: i32, + pub desired: i32, +} + +fn ordered_map( + value: &HashMap, + serializer: S, +) -> Result +where + S: Serializer, +{ + let ordered: BTreeMap<_, _> = value.iter().collect(); + ordered.serialize(serializer) +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Namespace { + #[serde(serialize_with = "ordered_map")] + pub deployments: HashMap, + pub pods: HashMap, +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Cluster { + pub name: String, + pub namespaces: HashMap, +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Clusters { + pub clusters: HashMap, + /// Map from cluster to index in agent URLs Vec. + pub agent_ids: HashMap, +} + +#[derive(Default, Debug, EnumString, Display, Hash, PartialEq, Eq, Clone, Copy)] +pub enum PodStatus { + #[default] + Unknown, + Running, + Pending, + LongPending, + NeedToMove, +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/mod.rs b/prover/crates/bin/prover_autoscaler/src/global/mod.rs new file mode 100644 index 00000000000..5e4afb93843 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/mod.rs @@ -0,0 +1,3 @@ +pub mod queuer; +pub mod scaler; +pub mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs new file mode 100644 index 00000000000..32610ebf3c3 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs @@ -0,0 +1,49 @@ +use std::collections::HashMap; + +use anyhow::{Context, Ok}; +use reqwest::Method; +use zksync_prover_job_monitor::autoscaler_queue_reporter::VersionedQueueReport; +use zksync_utils::http_with_retries::send_request_with_retries; + +use crate::metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}; + +const MAX_RETRIES: usize = 5; + +#[derive(Debug)] +pub struct Queue { + pub queue: HashMap, +} + +#[derive(Default)] +pub struct Queuer { + pub prover_job_monitor_url: String, +} + +impl Queuer { + pub fn new(pjm_url: String) -> Self { + Self { + prover_job_monitor_url: pjm_url, + } + } + + pub async fn get_queue(&self) -> anyhow::Result { + let url = &self.prover_job_monitor_url; + let response = send_request_with_retries(url, MAX_RETRIES, Method::GET, None, None).await; + let response = response.map_err(|err| { + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching queue from url: {url}: {err:?}") + })?; + + AUTOSCALER_METRICS.calls[&(url.clone(), response.status().as_u16())].inc(); + let response = response + .json::>() + .await + .context("Failed to read response as json")?; + Ok(Queue { + queue: response + .iter() + .map(|x| (x.version.to_string(), x.report.prover_jobs.queued as u64)) + .collect::>(), + }) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs new file mode 100644 index 00000000000..884174562a1 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -0,0 +1,751 @@ +use std::{collections::HashMap, str::FromStr}; + +use chrono::Utc; +use debug_map_sorted::SortedOutputExt; +use once_cell::sync::Lazy; +use regex::Regex; +use zksync_config::configs::prover_autoscaler::{Gpu, ProverAutoscalerScalerConfig}; + +use super::{queuer, watcher}; +use crate::{ + agent::{ScaleDeploymentRequest, ScaleRequest}, + cluster_types::{Cluster, Clusters, Pod, PodStatus}, + metrics::AUTOSCALER_METRICS, + task_wiring::Task, +}; + +const DEFAULT_SPEED: u32 = 500; + +#[derive(Default, Debug, PartialEq, Eq)] +struct GPUPool { + name: String, + gpu: Gpu, + provers: HashMap, // TODO: consider using i64 everywhere to avoid type casts. + preemtions: u64, + max_pool_size: u32, +} + +impl GPUPool { + fn sum_by_pod_status(&self, ps: PodStatus) -> u32 { + self.provers.get(&ps).cloned().unwrap_or(0) + } + + fn to_key(&self) -> GPUPoolKey { + GPUPoolKey { + cluster: self.name.clone(), + gpu: self.gpu, + } + } +} + +#[derive(Debug, Eq, Hash, PartialEq)] +struct GPUPoolKey { + cluster: String, + gpu: Gpu, +} + +static PROVER_DEPLOYMENT_RE: Lazy = + Lazy::new(|| Regex::new(r"^circuit-prover-gpu(-(?[ltvpa]\d+))?$").unwrap()); +static PROVER_POD_RE: Lazy = + Lazy::new(|| Regex::new(r"^circuit-prover-gpu(-(?[ltvpa]\d+))?").unwrap()); + +/// gpu_to_prover converts Gpu type to corresponding deployment name. +fn gpu_to_prover(gpu: Gpu) -> String { + let s = "circuit-prover-gpu"; + match gpu { + Gpu::Unknown => "".into(), + Gpu::L4 => s.into(), + _ => format!("{}-{}", s, gpu.to_string().to_lowercase()), + } +} + +pub struct Scaler { + /// namespace to Protocol Version configuration. + namespaces: HashMap, + watcher: watcher::Watcher, + queuer: queuer::Queuer, + + /// Which cluster to use first. + cluster_priorities: HashMap, + min_provers: HashMap, + max_provers: HashMap>, + prover_speed: HashMap, + long_pending_duration: chrono::Duration, +} + +struct ProverPodGpu<'a> { + name: &'a str, + pod: &'a Pod, + gpu: Gpu, +} + +impl<'a> ProverPodGpu<'a> { + fn new(name: &'a str, pod: &'a Pod) -> Option> { + PROVER_POD_RE.captures(name).map(|caps| Self { + name, + pod, + gpu: Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(), + }) + } +} + +impl Scaler { + pub fn new( + watcher: watcher::Watcher, + queuer: queuer::Queuer, + config: ProverAutoscalerScalerConfig, + ) -> Self { + config + .protocol_versions + .iter() + .for_each(|(namespace, version)| { + AUTOSCALER_METRICS.prover_protocol_version[&(namespace.clone(), version.clone())] + .set(1); + }); + Self { + namespaces: config.protocol_versions, + watcher, + queuer, + cluster_priorities: config.cluster_priorities, + min_provers: config.min_provers, + max_provers: config.max_provers, + prover_speed: config.prover_speed, + long_pending_duration: chrono::Duration::seconds( + config.long_pending_duration.whole_seconds(), + ), + } + } + + fn convert_to_gpu_pool(&self, namespace: &String, cluster: &Cluster) -> Vec { + let mut gp_map = HashMap::new(); // + let Some(namespace_value) = &cluster.namespaces.get(namespace) else { + // No namespace in config, ignoring. + return vec![]; + }; + + for caps in namespace_value + .deployments + .keys() + .filter_map(|dn| PROVER_DEPLOYMENT_RE.captures(dn)) + { + // Processing only provers. + let gpu = + Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(); + let e = gp_map.entry(gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu, + max_pool_size: self + .max_provers + .get(&cluster.name) + .and_then(|inner_map| inner_map.get(&gpu)) + .copied() + .unwrap_or(0), + ..Default::default() + }); + + // Initialize pool only if we have ready deployments. + e.provers.insert(PodStatus::Running, 0); + } + + for ppg in namespace_value + .pods + .iter() + .filter_map(|(pn, pv)| ProverPodGpu::new(pn, pv)) + { + let e = gp_map.entry(ppg.gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu: ppg.gpu, + ..Default::default() + }); + let mut status = PodStatus::from_str(&ppg.pod.status).unwrap_or_default(); + if status == PodStatus::Pending + && ppg.pod.changed < Utc::now() - self.long_pending_duration + { + status = PodStatus::LongPending; + } + tracing::info!( + "pod {}: status: {}, real status: {}", + ppg.name, + status, + ppg.pod.status + ); + e.provers.entry(status).and_modify(|n| *n += 1).or_insert(1); + } + + tracing::info!("From pods {:?}", gp_map.sorted_debug()); + + gp_map.into_values().collect() + } + + fn sorted_clusters(&self, namespace: &String, clusters: &Clusters) -> Vec { + let mut gpu_pools: Vec = clusters + .clusters + .values() + .flat_map(|c| self.convert_to_gpu_pool(namespace, c)) + .collect(); + + gpu_pools.sort_by(|a, b| { + a.gpu + .cmp(&b.gpu) // Sort by GPU first. + .then( + a.sum_by_pod_status(PodStatus::NeedToMove) + .cmp(&b.sum_by_pod_status(PodStatus::NeedToMove)), + ) // Sort by need to evict. + .then( + a.sum_by_pod_status(PodStatus::LongPending) + .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), + ) // Sort by long Pending pods. + .then(a.preemtions.cmp(&b.preemtions)) // Sort by preemtions in the cluster. + .then( + self.cluster_priorities + .get(&a.name) + .unwrap_or(&1000) + .cmp(self.cluster_priorities.get(&b.name).unwrap_or(&1000)), + ) // Sort by priority. + .then(b.max_pool_size.cmp(&a.max_pool_size)) // Reverse sort by cluster size. + }); + + gpu_pools + } + + fn speed(&self, gpu: Gpu) -> u64 { + self.prover_speed + .get(&gpu) + .cloned() + .unwrap_or(DEFAULT_SPEED) + .into() + } + + fn provers_to_speed(&self, gpu: Gpu, n: u32) -> u64 { + self.speed(gpu) * n as u64 + } + + fn normalize_queue(&self, gpu: Gpu, queue: u64) -> u64 { + let speed = self.speed(gpu); + // Divide and round up if there's any remainder. + (queue + speed - 1) / speed * speed + } + + fn run(&self, namespace: &String, queue: u64, clusters: &Clusters) -> HashMap { + let sc = self.sorted_clusters(namespace, clusters); + tracing::debug!("Sorted clusters for namespace {}: {:?}", namespace, &sc); + + // Increase queue size, if it's too small, to make sure that required min_provers are + // running. + let queue: u64 = self.min_provers.get(namespace).map_or(queue, |min| { + self.normalize_queue(Gpu::L4, queue) + .max(self.provers_to_speed(Gpu::L4, *min)) + }); + + let mut total: i64 = 0; + let mut provers: HashMap = HashMap::new(); + for c in &sc { + for (status, p) in &c.provers { + match status { + PodStatus::Running | PodStatus::Pending => { + total += self.provers_to_speed(c.gpu, *p) as i64; + provers + .entry(c.to_key()) + .and_modify(|x| *x += p) + .or_insert(*p); + } + _ => (), // Ignore LongPending as not running here. + } + } + } + + // Remove unneeded pods. + if (total as u64) > self.normalize_queue(Gpu::L4, queue) { + for c in sc.iter().rev() { + let mut excess_queue = total as u64 - self.normalize_queue(c.gpu, queue); + let mut excess_provers = (excess_queue / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p < excess_provers { + excess_provers = *p; + excess_queue = *p as u64 * self.speed(c.gpu); + } + *p -= excess_provers; + total -= excess_queue as i64; + if total <= 0 { + break; + }; + } + } + + // Reduce load in over capacity pools. + for c in &sc { + let p = provers.entry(c.to_key()).or_default(); + if c.max_pool_size < *p { + let excess = *p - c.max_pool_size; + total -= excess as i64 * self.speed(c.gpu) as i64; + *p -= excess; + } + } + + tracing::debug!("Queue covered with provers: {}", total); + // Add required provers. + if (total as u64) < queue { + for c in &sc { + let mut required_queue = queue - total as u64; + let mut required_provers = + (self.normalize_queue(c.gpu, required_queue) / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p + required_provers > c.max_pool_size { + required_provers = c.max_pool_size - *p; + required_queue = required_provers as u64 * self.speed(c.gpu); + } + *p += required_provers; + total += required_queue as i64; + } + } + + tracing::debug!( + "run result for namespace {}: provers {:?}, total: {}", + namespace, + &provers, + total + ); + + provers + } +} + +fn diff( + namespace: &str, + provers: HashMap, + clusters: &Clusters, + requests: &mut HashMap, +) { + provers + .into_iter() + .for_each(|(GPUPoolKey { cluster, gpu }, n)| { + let prover = gpu_to_prover(gpu); + clusters + .clusters + .get(&cluster) + .and_then(|c| c.namespaces.get(namespace)) + .and_then(|ns| ns.deployments.get(&prover)) + .map_or_else( + || { + tracing::error!( + "Wasn't able to find deployment {} in cluster {}, namespace {}", + prover, + cluster, + namespace + ) + }, + |d| { + if d.desired != n as i32 { + requests + .entry(cluster.clone()) + .or_default() + .deployments + .push(ScaleDeploymentRequest { + namespace: namespace.into(), + name: prover.clone(), + size: n as i32, + }); + } + }, + ); + }) +} + +/// is_namespace_running returns true if there are some pods running in it. +fn is_namespace_running(namespace: &str, clusters: &Clusters) -> bool { + clusters + .clusters + .values() + .flat_map(|v| v.namespaces.iter()) + .filter_map(|(k, v)| if k == namespace { Some(v) } else { None }) + .flat_map(|v| v.deployments.values()) + .map( + |d| d.running + d.desired, // If there is something running or expected to run, we + // should re-evaluate the namespace. + ) + .sum::() + > 0 +} + +#[async_trait::async_trait] +impl Task for Scaler { + async fn invoke(&self) -> anyhow::Result<()> { + let queue = self.queuer.get_queue().await.unwrap(); + + let mut scale_requests: HashMap = HashMap::new(); + { + let guard = self.watcher.data.lock().await; // Keeping the lock during all calls of run() for + // consitency. + if let Err(err) = watcher::check_is_ready(&guard.is_ready) { + AUTOSCALER_METRICS.clusters_not_ready.inc(); + tracing::warn!("Skipping Scaler run: {}", err); + return Ok(()); + } + + for (ns, ppv) in &self.namespaces { + let q = queue.queue.get(ppv).cloned().unwrap_or(0); + tracing::debug!("Running eval for namespace {ns} and PPV {ppv} found queue {q}"); + if q > 0 || is_namespace_running(ns, &guard.clusters) { + let provers = self.run(ns, q, &guard.clusters); + for (k, num) in &provers { + AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] + .set(*num as u64); + } + diff(ns, provers, &guard.clusters, &mut scale_requests); + } + } + } // Unlock self.watcher.data. + + if let Err(err) = self.watcher.send_scale(scale_requests).await { + tracing::error!("Failed scale request: {}", err); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + cluster_types::{Deployment, Namespace, Pod}, + global::{queuer, watcher}, + }; + + #[tracing_test::traced_test] + #[test] + fn test_run() { + let scaler = Scaler::new( + watcher::Watcher::default(), + queuer::Queuer::default(), + ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + min_provers: [("prover-other".into(), 2)].into(), + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + ..Default::default() + }, + ); + + assert_eq!( + scaler.run( + &"prover".into(), + 1499, + &Clusters { + clusters: [( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + pods: [( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + )] + .into(), + }, + )] + .into(), + }, + )] + .into(), + ..Default::default() + }, + ), + [( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 3, + )] + .into(), + "3 new provers" + ); + assert_eq!( + scaler.run( + &"prover".into(), + 499, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 1, + desired: 1, + }, + )] + .into(), + pods: [( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + )] + .into(), + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 0, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 1, + ) + ] + .into(), + "Preserve running" + ); + } + + #[tracing_test::traced_test] + #[test] + fn test_run_min_provers() { + let scaler = Scaler::new( + watcher::Watcher::default(), + queuer::Queuer::default(), + ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + min_provers: [("prover".into(), 2)].into(), + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + ..Default::default() + }, + ); + + assert_eq!( + scaler.run( + &"prover".into(), + 10, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 0, + ) + ] + .into(), + "Min 2 provers, non running" + ); + assert_eq!( + scaler.run( + &"prover".into(), + 0, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 3, + desired: 3, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc3".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ) + ] + .into(), + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 2, + desired: 2, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ) + ] + .into(), + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 0, + ) + ] + .into(), + "Min 2 provers, 5 running" + ); + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs new file mode 100644 index 00000000000..6e02c0fe2fd --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -0,0 +1,203 @@ +use std::{collections::HashMap, sync::Arc}; + +use anyhow::{anyhow, Context, Ok, Result}; +use futures::future; +use reqwest::{ + header::{HeaderMap, HeaderValue, CONTENT_TYPE}, + Method, +}; +use tokio::sync::Mutex; +use url::Url; +use zksync_utils::http_with_retries::send_request_with_retries; + +use crate::{ + agent::{ScaleRequest, ScaleResponse}, + cluster_types::{Cluster, Clusters}, + metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}, + task_wiring::Task, +}; + +const MAX_RETRIES: usize = 5; + +#[derive(Default)] +pub struct WatchedData { + pub clusters: Clusters, + pub is_ready: Vec, +} + +pub fn check_is_ready(v: &Vec) -> Result<()> { + for b in v { + if !b { + return Err(anyhow!("Clusters data is not ready")); + } + } + Ok(()) +} + +#[derive(Default, Clone)] +pub struct Watcher { + /// List of base URLs of all agents. + pub cluster_agents: Vec>, + pub data: Arc>, +} + +impl Watcher { + pub fn new(agent_urls: Vec) -> Self { + let size = agent_urls.len(); + Self { + cluster_agents: agent_urls + .into_iter() + .map(|u| { + Arc::new( + Url::parse(&u) + .unwrap_or_else(|e| panic!("Unparsable Agent URL {}: {}", u, e)), + ) + }) + .collect(), + data: Arc::new(Mutex::new(WatchedData { + clusters: Clusters::default(), + is_ready: vec![false; size], + })), + } + } + + pub async fn send_scale(&self, requests: HashMap) -> anyhow::Result<()> { + let id_requests: HashMap; + { + // Convert cluster names into ids. Holding the data lock. + let guard = self.data.lock().await; + id_requests = requests + .into_iter() + .filter_map(|(cluster, scale_request)| { + guard.clusters.agent_ids.get(&cluster).map_or_else( + || { + tracing::error!("Failed to find id for cluster {}", cluster); + None + }, + |id| Some((*id, scale_request)), + ) + }) + .collect(); + } + + let handles: Vec<_> = id_requests + .into_iter() + .map(|(id, sr)| { + let url: String = self.cluster_agents[id] + .clone() + .join("/scale") + .unwrap() + .to_string(); + tracing::debug!("Sending scale request to {}, data: {:?}.", url, sr); + tokio::spawn(async move { + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + let response = send_request_with_retries( + &url, + MAX_RETRIES, + Method::POST, + Some(headers), + Some(serde_json::to_vec(&sr)?), + ) + .await; + let response = response.map_err(|err| { + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })?; + AUTOSCALER_METRICS.calls[&(url, response.status().as_u16())].inc(); + let response = response + .json::() + .await + .context("Failed to read response as json"); + Ok((id, response)) + }) + }) + .collect(); + + future::try_join_all( + future::join_all(handles) + .await + .into_iter() + .map(|h| async move { + let (id, res) = h??; + + let errors: Vec<_> = res + .expect("failed to do request to Agent") + .scale_result + .iter() + .filter_map(|e| { + if !e.is_empty() { + Some(format!("Agent {} failed to scale: {}", id, e)) + } else { + None + } + }) + .collect(); + + if !errors.is_empty() { + return Err(anyhow!(errors.join(";"))); + } + Ok(()) + }) + .collect::>(), + ) + .await?; + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Task for Watcher { + async fn invoke(&self) -> anyhow::Result<()> { + let handles: Vec<_> = self + .cluster_agents + .clone() + .into_iter() + .enumerate() + .map(|(i, a)| { + tracing::debug!("Getting cluster data from agent {}.", a); + tokio::spawn(async move { + let url: String = a + .clone() + .join("/cluster") + .context("Failed to join URL with /cluster")? + .to_string(); + let response = + send_request_with_retries(&url, MAX_RETRIES, Method::GET, None, None).await; + + let response = response.map_err(|err| { + // TODO: refactor send_request_with_retries to return status. + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })?; + AUTOSCALER_METRICS.calls[&(url, response.status().as_u16())].inc(); + let response = response + .json::() + .await + .context("Failed to read response as json"); + Ok((i, response)) + }) + }) + .collect(); + + future::try_join_all( + future::join_all(handles) + .await + .into_iter() + .map(|h| async move { + let (i, res) = h??; + let c = res?; + let mut guard = self.data.lock().await; + guard.clusters.agent_ids.insert(c.name.clone(), i); + guard.clusters.clusters.insert(c.name.clone(), c); + guard.is_ready[i] = true; + Ok(()) + }) + .collect::>(), + ) + .await?; + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs new file mode 100644 index 00000000000..0804b9eaa40 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs @@ -0,0 +1,5 @@ +pub use scaler::Scaler; +pub use watcher::Watcher; + +mod scaler; +mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs new file mode 100644 index 00000000000..5e6f56aacc9 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs @@ -0,0 +1,42 @@ +use k8s_openapi::api; +use kube::api::{Api, Patch, PatchParams}; + +#[derive(Clone)] +pub struct Scaler { + pub client: kube::Client, + dry_run: bool, +} + +impl Scaler { + pub fn new(client: kube::Client, dry_run: bool) -> Self { + Self { client, dry_run } + } + + pub async fn scale(&self, namespace: &str, name: &str, size: i32) -> anyhow::Result<()> { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + + let patch = serde_json::json!({ + "apiVersion": "apps/v1", + "kind": "Deployment", + "spec": { + "replicas": size + } + }); + + if self.dry_run { + tracing::info!( + "Dry run of scaled deployment/{} to {} replica(s).", + name, + size + ); + return Ok(()); + } + + let pp = PatchParams::default(); + deployments.patch(name, &pp, &Patch::Merge(patch)).await?; + tracing::info!("Scaled deployment/{} to {} replica(s).", name, size); + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs new file mode 100644 index 00000000000..f94dfc3704f --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -0,0 +1,133 @@ +use std::{collections::HashMap, sync::Arc}; + +use chrono::Utc; +use futures::{stream, StreamExt, TryStreamExt}; +use k8s_openapi::api; +use kube::{ + api::{Api, ResourceExt}, + runtime::{watcher, WatchStreamExt}, +}; +use tokio::sync::Mutex; + +use crate::cluster_types::{Cluster, Deployment, Namespace, Pod}; + +#[derive(Clone)] +pub struct Watcher { + pub client: kube::Client, + pub cluster: Arc>, +} + +impl Watcher { + pub fn new(client: kube::Client, cluster_name: String, namespaces: Vec) -> Self { + let mut ns = HashMap::new(); + namespaces.into_iter().for_each(|n| { + ns.insert(n, Namespace::default()); + }); + + Self { + client, + cluster: Arc::new(Mutex::new(Cluster { + name: cluster_name, + namespaces: ns, + })), + } + } + + pub async fn run(self) -> anyhow::Result<()> { + // TODO: add actual metrics + + // TODO: watch for a list of namespaces, get: + // - deployments (name, running, desired) [done] + // - pods (name, parent deployment, statuses, when the last status change) [~done] + // - events (number of scheduling failures in last N seconds, which deployments) + // - events (preemptions, which deployment, when, how many) + // - pool size from GCP (name, size, which GPU) + let mut watchers = vec![]; + for namespace in self.cluster.lock().await.namespaces.keys() { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(deployments, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Deploy) + .boxed(), + ); + + let pods: Api = Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(pods, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Pod) + .boxed(), + ); + } + // select on applied events from all watchers + let mut combo_stream = stream::select_all(watchers); + // SelectAll Stream elements must have the same Item, so all packed in this: + #[allow(clippy::large_enum_variant)] + enum Watched { + Deploy(api::apps::v1::Deployment), + Pod(api::core::v1::Pod), + } + while let Some(o) = combo_stream.try_next().await? { + match o { + Watched::Deploy(d) => { + let namespace = match d.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let dep = v + .deployments + .entry(d.name_any()) + .or_insert(Deployment::default()); + let nums = d.status.clone().unwrap_or_default(); + dep.running = nums.available_replicas.unwrap_or_default(); + dep.desired = nums.replicas.unwrap_or_default(); + + tracing::info!( + "Got deployment: {}, size: {}/{} un {}", + d.name_any(), + nums.available_replicas.unwrap_or_default(), + nums.replicas.unwrap_or_default(), + nums.unavailable_replicas.unwrap_or_default(), + ) + } + Watched::Pod(p) => { + let namespace = match p.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); + pod.owner = p + .owner_references() + .iter() + .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) + .collect::>() + .join(":"); + // TODO: Collect replica sets to match deployments and pods. + let phase = p + .status + .clone() + .unwrap_or_default() + .phase + .unwrap_or_default(); + if phase != pod.status { + // TODO: try to get an idea how to set correct value on restart. + pod.changed = Utc::now(); + } + pod.status = phase; + + tracing::info!("Got pod: {}", p.name_any()) + } + } + } + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/lib.rs b/prover/crates/bin/prover_autoscaler/src/lib.rs new file mode 100644 index 00000000000..0b0d704c907 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/lib.rs @@ -0,0 +1,6 @@ +pub mod agent; +pub(crate) mod cluster_types; +pub mod global; +pub mod k8s; +pub(crate) mod metrics; +pub mod task_wiring; diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs new file mode 100644 index 00000000000..45e476079a5 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -0,0 +1,146 @@ +use std::time::Duration; + +use anyhow::Context; +use structopt::StructOpt; +use tokio::{ + sync::{oneshot, watch}, + task::JoinHandle, +}; +use zksync_core_leftovers::temp_config_store::read_yaml_repr; +use zksync_protobuf_config::proto::prover_autoscaler; +use zksync_prover_autoscaler::{ + agent, + global::{self}, + k8s::{Scaler, Watcher}, + task_wiring::TaskRunner, +}; +use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +/// Represents the sequential number of the Prover Autoscaler type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] +pub enum AutoscalerType { + Scaler, + Agent, +} + +impl std::str::FromStr for AutoscalerType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "scaler" => Ok(AutoscalerType::Scaler), + "agent" => Ok(AutoscalerType::Agent), + other => Err(format!("{} is not a valid AutoscalerType", other)), + } + } +} + +#[derive(Debug, StructOpt)] +#[structopt(name = "Prover Autoscaler", about = "Run Prover Autoscaler components")] +struct Opt { + /// Prover Autoscaler can run Agent or Scaler type. + /// + /// Specify `agent` or `scaler` + #[structopt(short, long, default_value = "agent")] + job: AutoscalerType, + /// Name of the cluster Agent is watching. + #[structopt(long)] + cluster_name: Option, + /// Path to the configuration file. + #[structopt(long)] + config_path: std::path::PathBuf, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opt = Opt::from_args(); + let general_config = + read_yaml_repr::(&opt.config_path) + .context("general config")?; + let observability_config = general_config + .observability + .context("observability config")?; + let _observability_guard = observability_config.install()?; + // That's unfortunate that there are at least 3 different Duration in rust and we use all 3 in this repo. + // TODO: Consider updating zksync_protobuf to support std::time::Duration. + let graceful_shutdown_timeout = general_config.graceful_shutdown_timeout.unsigned_abs(); + + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(sender) = stop_signal_sender.take() { + sender.send(()).ok(); + } + }) + .context("Error setting Ctrl+C handler")?; + + let (stop_sender, stop_receiver) = watch::channel(false); + + let _ = rustls::crypto::ring::default_provider().install_default(); + let client = kube::Client::try_default().await?; + + let mut tasks = vec![]; + + match opt.job { + AutoscalerType::Agent => { + let cluster = opt + .cluster_name + .context("cluster_name is required for Agent")?; + tracing::info!("Starting ProverAutoscaler Agent for cluster {}", cluster); + let agent_config = general_config.agent_config.context("agent_config")?; + let exporter_config = PrometheusExporterConfig::pull(agent_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + + // TODO: maybe get cluster name from curl -H "Metadata-Flavor: Google" + // http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name + let watcher = Watcher::new(client.clone(), cluster, agent_config.namespaces); + let scaler = Scaler::new(client, agent_config.dry_run); + tasks.push(tokio::spawn(watcher.clone().run())); + tasks.push(tokio::spawn(agent::run_server( + agent_config.http_port, + watcher, + scaler, + stop_receiver.clone(), + ))) + } + AutoscalerType::Scaler => { + tracing::info!("Starting ProverAutoscaler Scaler"); + let scaler_config = general_config.scaler_config.context("scaler_config")?; + let interval = scaler_config.scaler_run_interval.unsigned_abs(); + let exporter_config = PrometheusExporterConfig::pull(scaler_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + let watcher = global::watcher::Watcher::new(scaler_config.agents.clone()); + let queuer = global::queuer::Queuer::new(scaler_config.prover_job_monitor_url.clone()); + let scaler = global::scaler::Scaler::new(watcher.clone(), queuer, scaler_config); + tasks.extend(get_tasks(watcher, scaler, interval, stop_receiver)?); + } + } + + let mut tasks = ManagedTasks::new(tasks); + + tokio::select! { + _ = tasks.wait_single() => {}, + _ = stop_signal_receiver => { + tracing::info!("Stop signal received, shutting down"); + } + } + stop_sender.send(true).ok(); + tasks.complete(graceful_shutdown_timeout).await; + + Ok(()) +} + +fn get_tasks( + watcher: global::watcher::Watcher, + scaler: global::scaler::Scaler, + interval: Duration, + stop_receiver: watch::Receiver, +) -> anyhow::Result>>> { + let mut task_runner = TaskRunner::default(); + + task_runner.add("Watcher", interval, watcher); + task_runner.add("Scaler", interval, scaler); + + Ok(task_runner.spawn(stop_receiver)) +} diff --git a/prover/crates/bin/prover_autoscaler/src/metrics.rs b/prover/crates/bin/prover_autoscaler/src/metrics.rs new file mode 100644 index 00000000000..d94ac8b97e9 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/metrics.rs @@ -0,0 +1,20 @@ +use vise::{Counter, Gauge, LabeledFamily, Metrics}; +use zksync_config::configs::prover_autoscaler::Gpu; + +pub const DEFAULT_ERROR_CODE: u16 = 500; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "autoscaler")] +pub(crate) struct AutoscalerMetrics { + #[metrics(labels = ["target_namespace", "protocol_version"])] + pub prover_protocol_version: LabeledFamily<(String, String), Gauge, 2>, + #[metrics(labels = ["target_cluster", "target_namespace", "gpu"])] + pub provers: LabeledFamily<(String, String, Gpu), Gauge, 3>, + pub clusters_not_ready: Counter, + #[metrics(labels = ["target", "status"])] + pub calls: LabeledFamily<(String, u16), Counter, 2>, + // TODO: count of command send succes/fail +} + +#[vise::register] +pub(crate) static AUTOSCALER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/bin/prover_autoscaler/src/task_wiring.rs b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs new file mode 100644 index 00000000000..9b60145ad9e --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use anyhow::Context; +use tracing::Instrument; + +/// Task trait to be run in ProverJobMonitor. +#[async_trait::async_trait] +pub trait Task { + async fn invoke(&self) -> anyhow::Result<()>; +} + +/// Wrapper for Task with a periodic interface. Holds information about the task and provides DB connectivity. +struct PeriodicTask { + job: Box, + name: String, + interval: Duration, +} + +impl PeriodicTask { + async fn run( + &self, + mut stop_receiver: tokio::sync::watch::Receiver, + ) -> anyhow::Result<()> { + tracing::info!( + "Started Task {} with run interval: {:?}", + self.name, + self.interval + ); + + let mut interval = tokio::time::interval(self.interval); + + while !*stop_receiver.borrow_and_update() { + interval.tick().await; + self.job + .invoke() + .instrument(tracing::info_span!("run", service_name = %self.name)) + .await + .context("failed to invoke task")?; + } + tracing::info!("Stop signal received; Task {} is shut down", self.name); + Ok(()) + } +} + +/// Wrapper on a vector of task. Makes adding/spawning tasks and sharing resources ergonomic. +#[derive(Default)] +pub struct TaskRunner { + tasks: Vec, +} + +impl TaskRunner { + pub fn add(&mut self, name: &str, interval: Duration, job: T) { + self.tasks.push(PeriodicTask { + name: name.into(), + interval, + job: Box::new(job), + }); + } + + pub fn spawn( + self, + stop_receiver: tokio::sync::watch::Receiver, + ) -> Vec>> { + self.tasks + .into_iter() + .map(|task| { + let receiver = stop_receiver.clone(); + tokio::spawn(async move { task.run(receiver).await }) + }) + .collect() + } +} diff --git a/prover/crates/bin/witness_generator/README.md b/prover/crates/bin/witness_generator/README.md index dc476ca44fc..6063c29b334 100644 --- a/prover/crates/bin/witness_generator/README.md +++ b/prover/crates/bin/witness_generator/README.md @@ -1,9 +1,5 @@ # WitnessGenerator -Please read this -[doc](https://www.notion.so/matterlabs/Draft-FRI-Prover-Integration-Prover-Shadowing-c4b1373786eb43779a93118be4be5d99) -for rationale of this binary, alongside the existing one in zk-core. - The component is responsible for generating prover jobs and saving artifacts needed for the next round of proof aggregation. That is, every aggregation round needs two sets of input: diff --git a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs index 23ae1b0f2af..31dc5481410 100644 --- a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs @@ -5,7 +5,7 @@ use std::{ }; use circuit_definitions::{ - circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, + circuit_definitions::base_layer::ZkSyncBaseLayerStorage, encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, }; @@ -21,7 +21,7 @@ use zksync_multivm::{ zk_evm_latest::ethereum_types::Address, }; use zksync_object_store::ObjectStore; -use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; +use zksync_prover_fri_types::keys::ClosedFormInputKey; use zksync_prover_interface::inputs::WitnessInputData; use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::L1BatchNumber; @@ -31,8 +31,7 @@ use crate::{ rounds::basic_circuits::Witness, storage_oracle::StorageOracle, utils::{ - expand_bootloader_contents, save_circuit, save_ram_premutation_queue_witness, - ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, + expand_bootloader_contents, save_circuit, ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, }; @@ -64,17 +63,38 @@ pub(super) async fn generate_witness( let (circuit_sender, mut circuit_receiver) = tokio::sync::mpsc::channel(1); let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); - let (ram_permutation_queue_sender, mut ram_permutation_queue_receiver) = - tokio::sync::mpsc::channel(1); let make_circuits_span = tracing::info_span!("make_circuits"); let make_circuits_span_copy = make_circuits_span.clone(); + + use std::{sync::mpsc::sync_channel, thread}; + let (artifacts_sender, artifacts_receiver) = sync_channel(1); + + let artifacts_receiver_handle = thread::spawn(move || { + let span = tracing::info_span!(parent: make_circuits_span_copy, "make_circuits_blocking"); + + while let Ok(artifact) = artifacts_receiver.recv() { + match artifact { + WitnessGenerationArtifact::BaseLayerCircuit(circuit) => { + let parent_span = span.clone(); + tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { + circuit_sender + .blocking_send(circuit) + .expect("failed to send circuit from harness"); + }); + } + WitnessGenerationArtifact::RecursionQueue((a, b, c)) => queue_sender + .blocking_send((a as u8, b, c)) + .expect("failed to send recursion queue from harness"), + _ => {} + } + } + }); + // Blocking call from harness that does the CPU heavy lifting. // Provides circuits and recursion queue via callback functions and returns scheduler witnesses. // Circuits are "streamed" one by one as they're being generated. let make_circuits_handle = tokio::task::spawn_blocking(move || { - let span = tracing::info_span!(parent: make_circuits_span_copy, "make_circuits_blocking"); - let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); let storage_view = StorageView::new(witness_storage).to_rc_ptr(); @@ -91,28 +111,10 @@ pub(super) async fn generate_witness( .to_str() .expect("Path to KZG trusted setup is not a UTF-8 string"); - let artifacts_callback = |artifact: WitnessGenerationArtifact| match artifact { - WitnessGenerationArtifact::BaseLayerCircuit(circuit) => { - let parent_span = span.clone(); - tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { - circuit_sender - .blocking_send(circuit) - .expect("failed to send circuit from harness"); - }); - } - WitnessGenerationArtifact::RecursionQueue((a, b, c)) => queue_sender - .blocking_send((a as u8, b, c)) - .expect("failed to send recursion queue from harness"), - a @ WitnessGenerationArtifact::MemoryQueueWitness(_) => { - let parent_span = span.clone(); - tracing::info_span!(parent: parent_span, "send_ram_permutation_queue_witness") - .in_scope(|| { - ram_permutation_queue_sender - .blocking_send(a) - .expect("failed to send ram permutation queue sitness from harness"); - }); - } - }; + let evm_emulator_code_hash = input.vm_run_data.evm_emulator_code_hash; + // By convention, default AA is used instead of the EVM emulator if the latter is disabled. + let evm_emulator_code_hash = + evm_emulator_code_hash.unwrap_or(input.vm_run_data.default_account_code_hash); let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), @@ -121,17 +123,16 @@ pub(super) async fn generate_witness( bootloader_contents, false, input.vm_run_data.default_account_code_hash, - // NOTE: this will be evm_simulator_code_hash in future releases - input.vm_run_data.default_account_code_hash, + evm_emulator_code_hash, input.vm_run_data.used_bytecodes, Vec::default(), MAX_CYCLES_FOR_TX as usize, geometry_config, storage_oracle, tree, - path, + path.to_owned(), input.eip_4844_blobs.blobs(), - artifacts_callback, + artifacts_sender, ); (scheduler_witness, block_witness) }) @@ -150,8 +151,6 @@ pub(super) async fn generate_witness( // If the order is tampered with, proving will fail (as the proof would be computed for a different sequence of instruction). let mut circuit_sequence = 0; - let mut ram_circuit_sequence = 0; - while let Some(circuit) = circuit_receiver .recv() .instrument(tracing::info_span!("wait_for_circuit")) @@ -166,26 +165,9 @@ pub(super) async fn generate_witness( .await .expect("failed to get permit for running save circuit task"); - let partial_circuit_aux_data = match &circuit { - ZkSyncBaseLayerCircuit::RAMPermutation(_) => { - let circuit_subsequence_number = ram_circuit_sequence; - ram_circuit_sequence += 1; - Some(CircuitAuxData { - circuit_subsequence_number, - }) - } - _ => None, - }; - save_circuit_handles.push(tokio::task::spawn(async move { - let (circuit_id, circuit_url) = save_circuit( - block_number, - circuit, - sequence, - partial_circuit_aux_data, - object_store, - ) - .await; + let (circuit_id, circuit_url) = + save_circuit(block_number, circuit, sequence, object_store).await; drop(permit); (circuit_id, circuit_url) })); @@ -193,57 +175,6 @@ pub(super) async fn generate_witness( } .instrument(save_circuits_span); - let mut save_ram_queue_witness_handles = vec![]; - - let save_ram_queue_witness_span = tracing::info_span!("save_circuits"); - - // Future which receives part of RAM permutation circuits witnesses and saves them async. - // Uses semaphore because these artifacts are of significant size - let ram_queue_witness_receiver_handle = async { - let mut sorted_sequence = 0; - let mut unsorted_sequence = 0; - - while let Some(witness_artifact) = ram_permutation_queue_receiver - .recv() - .instrument(tracing::info_span!("wait_for_ram_witness")) - .await - { - let object_store = object_store.clone(); - let semaphore = semaphore.clone(); - let permit = semaphore - .acquire_owned() - .await - .expect("failed to get permit for running save ram permutation queue witness task"); - let (is_sorted, witness, sequence) = match witness_artifact { - WitnessGenerationArtifact::MemoryQueueWitness((witness, sorted)) => { - let sequence = if sorted { - let sequence = sorted_sequence; - sorted_sequence += 1; - sequence - } else { - let sequence = unsorted_sequence; - unsorted_sequence += 1; - sequence - }; - (sorted, witness, sequence) - } - _ => panic!("Invalid artifact received"), - }; - save_ram_queue_witness_handles.push(tokio::task::spawn(async move { - let _ = save_ram_premutation_queue_witness( - block_number, - sequence, - is_sorted, - witness, - object_store, - ) - .await; - drop(permit); - })); - } - } - .instrument(save_ram_queue_witness_span); - let mut save_queue_handles = vec![]; let save_queues_span = tracing::info_span!("save_queues"); @@ -269,11 +200,10 @@ pub(super) async fn generate_witness( } .instrument(save_queues_span); - let (witnesses, _, _, _) = tokio::join!( + let (witnesses, _, _) = tokio::join!( make_circuits_handle, circuit_receiver_handle, - queue_receiver_handle, - ram_queue_witness_receiver_handle + queue_receiver_handle ); let (mut scheduler_witness, block_aux_witness) = witnesses.unwrap(); @@ -298,11 +228,7 @@ pub(super) async fn generate_witness( .filter(|(circuit_id, _, _)| circuits_present.contains(circuit_id)) .collect(); - let _: Vec<_> = futures::future::join_all(save_ram_queue_witness_handles) - .await - .into_iter() - .map(|result| result.expect("failed to save ram permutation queue witness")) - .collect(); + artifacts_receiver_handle.join().unwrap(); scheduler_witness.previous_block_meta_hash = input.previous_batch_metadata.meta_hash.0; scheduler_witness.previous_block_aux_hash = input.previous_batch_metadata.aux_hash.0; diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index 8524bdae9ff..ea631f19cd8 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -3,10 +3,7 @@ use std::{ sync::Arc, }; -use circuit_definitions::{ - circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, - encodings::memory_query::MemoryQueueStateWitnesses, -}; +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; use once_cell::sync::Lazy; use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; use zksync_multivm::utils::get_used_bootloader_memory_bytes; @@ -24,8 +21,8 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness, }, - keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey, RamPermutationQueueWitnessKey}, - CircuitAuxData, CircuitWrapper, FriProofWrapper, RamPermutationQueueWitness, + keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey}, + CircuitWrapper, FriProofWrapper, }; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber, ProtocolVersionId, U256}; @@ -121,7 +118,6 @@ pub async fn save_circuit( block_number: L1BatchNumber, circuit: ZkSyncBaseLayerCircuit, sequence_number: usize, - aux_data_for_partial_circuit: Option, object_store: Arc, ) -> (u8, String) { let circuit_id = circuit.numeric_circuit_type(); @@ -133,43 +129,12 @@ pub async fn save_circuit( depth: 0, }; - let blob_url = if let Some(aux_data_for_partial_circuit) = aux_data_for_partial_circuit { - object_store - .put( - circuit_key, - &CircuitWrapper::BasePartial((circuit, aux_data_for_partial_circuit)), - ) - .await - .unwrap() - } else { - object_store - .put(circuit_key, &CircuitWrapper::Base(circuit)) - .await - .unwrap() - }; - (circuit_id, blob_url) -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number) -)] -pub async fn save_ram_premutation_queue_witness( - block_number: L1BatchNumber, - circuit_subsequence_number: usize, - is_sorted: bool, - witness: MemoryQueueStateWitnesses, - object_store: Arc, -) -> String { - let witness_key = RamPermutationQueueWitnessKey { - block_number, - circuit_subsequence_number, - is_sorted, - }; - object_store - .put(witness_key, &RamPermutationQueueWitness { witness }) + let blob_url = object_store + .put(circuit_key, &CircuitWrapper::Base(circuit)) .await - .unwrap() + .unwrap(); + + (circuit_id, blob_url) } #[tracing::instrument( diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 6225943e3cd..ab3b115bc63 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -470,6 +470,7 @@ impl Keystore { } /// Async loads mapping of all circuits to setup key, if successful + #[cfg(feature = "gpu")] pub async fn load_all_setup_key_mapping( &self, ) -> anyhow::Result>> { diff --git a/prover/crates/lib/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs index 4a8a1b3e406..37e004d54ec 100644 --- a/prover/crates/lib/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -28,8 +28,8 @@ pub mod keys; pub mod queue; // THESE VALUES SHOULD BE UPDATED ON ANY PROTOCOL UPGRADE OF PROVERS -pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; -pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(2); +pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version25; +pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(0); pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { minor: PROVER_PROTOCOL_VERSION, patch: PROVER_PROTOCOL_PATCH, diff --git a/prover/docs/03_launch.md b/prover/docs/03_launch.md index 203fb6e8cec..0465d888f61 100644 --- a/prover/docs/03_launch.md +++ b/prover/docs/03_launch.md @@ -47,7 +47,7 @@ We will be running a bunch of binaries, it's recommended to run each in a separa ### Server ``` -zk server --components=api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip +zk server --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip ``` ### Proof data handler diff --git a/prover/docs/05_proving_batch.md b/prover/docs/05_proving_batch.md index e09a44cb0ff..c35de975bf7 100644 --- a/prover/docs/05_proving_batch.md +++ b/prover/docs/05_proving_batch.md @@ -14,17 +14,25 @@ GPU, which requires an NVIDIA A100 80GB GPU. ### Prerequisites -First of all, you need to install CUDA drivers, all other things will be dealt with by `zk_inception` and `prover_cli` -tools. For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). +First of all, you need to install CUDA drivers, all other things will be dealt with by `zkstack` and `prover_cli` tools. +For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). Install the prerequisites, which you can find [here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/setup-dev.md). Note, that if you are not using Google VM instance, you also need to install [gcloud](https://cloud.google.com/sdk/docs/install#deb). -Now, you can use `zk_inception` and `prover_cli` tools for setting up the env and running prover subsystem. +Now, you can use `zkstack` and `prover_cli` tools for setting up the env and running prover subsystem. -```shell -cargo +nightly-2024-08-01 install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor prover_cli --force +First, install `zkstackup` with: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +Then install the most recent version of `zkstack` with: + +```bash +zkstackup ``` ## Initializing system @@ -33,14 +41,14 @@ After you have installed the tool, you can create ecosystem(you need to run only running: ```shell -zk_inception ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true +zkstack ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true ``` The command will create the ecosystem and all the necessary components for the prover subsystem. You can leave default values for all the prompts you will see Now, you need to initialize the prover subsystem by running: ```shell -zk_inception prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false +zkstack prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false ``` For prompts you can leave default values as well. @@ -87,13 +95,23 @@ After you have the data, you need to prepare the system to run the batch. So, da the protocol version it should use. You can do that with running ```shell -zk_supervisor prover-version +zkstack dev prover info ``` Example output: ```shell -Current protocol version found in zksync-era: 0.24.2, snark_wrapper: "0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2" +=============================== + +Current prover setup information: + +Protocol version: 0.24.2 + +Snark wrapper: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 + +Database URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_era + +=============================== ``` This command will provide you with the information about the semantic protocol version(you need to know only minor and @@ -118,7 +136,7 @@ prover_cli insert-batch --number= --version==13.7.0": - version "22.0.0" - resolved "https://registry.yarnpkg.com/@types/node/-/node-22.0.0.tgz#04862a2a71e62264426083abe1e27e87cac05a30" - integrity sha512-VT7KSYudcPOzP5Q0wfbowyNLaVR8QWUdw+088uFWwfvpY6uCWaXpqV6ieLAu9WBcnTa7H4Z5RLK8I5t2FuOcqw== + version "20.12.7" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.12.7.tgz#04080362fa3dd6c5822061aa3124f5c152cff384" + integrity sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg== dependencies: - undici-types "~6.11.1" + undici-types "~5.26.4" "@types/node@11.11.6": version "11.11.6" @@ -2720,9 +2951,9 @@ integrity sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw== "@types/node@^18.19.15": - version "18.19.42" - resolved "https://registry.yarnpkg.com/@types/node/-/node-18.19.42.tgz#b54ed4752c85427906aab40917b0f7f3d724bf72" - integrity sha512-d2ZFc/3lnK2YCYhos8iaNIYu9Vfhr92nHiyJHRltXWjXUBjEE+A4I58Tdbnw4VhggSW+2j5y5gTrLs4biNnubg== + version "18.19.31" + resolved "https://registry.yarnpkg.com/@types/node/-/node-18.19.31.tgz#b7d4a00f7cb826b60a543cebdbda5d189aaecdcd" + integrity sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA== dependencies: undici-types "~5.26.4" @@ -2739,9 +2970,9 @@ "@types/node" "*" "@types/pg@^8.10.3": - version "8.11.6" - resolved "https://registry.yarnpkg.com/@types/pg/-/pg-8.11.6.tgz#a2d0fb0a14b53951a17df5197401569fb9c0c54b" - integrity sha512-/2WmmBXHLsfRqzfHW7BNZ8SbYzE8OSk7i3WjFYvfgRHj7S1xj+16Je5fUKv3lVdVzk/zn9TXOqf+avFCFIE0yQ== + version "8.11.5" + resolved "https://registry.yarnpkg.com/@types/pg/-/pg-8.11.5.tgz#a1ffb4dc4a46a83bda096cb298051a5b171de167" + integrity sha512-2xMjVviMxneZHDHX5p5S6tsRRs7TpDHeeK7kTTMe/kAC/mRRNjWHjZg0rkiY+e17jXSZV3zJYDxXV8Cy72/Vuw== dependencies: "@types/node" "*" pg-protocol "*" @@ -2753,9 +2984,9 @@ integrity sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA== "@types/qs@^6.2.31": - version "6.9.15" - resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.15.tgz#adde8a060ec9c305a82de1babc1056e73bd64dce" - integrity sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg== + version "6.9.14" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.14.tgz#169e142bfe493895287bee382af6039795e9b75b" + integrity sha512-5khscbd3SwWMhFqylJBLQ0zIu7c1K6Vz0uBIt915BI3zV0q1nfjRQD3RqSBcPaO6PHEF4ov/t9y89fSiyThlPA== "@types/resolve@^0.0.8": version "0.0.8" @@ -3003,21 +3234,19 @@ acorn-jsx@^5.3.1, acorn-jsx@^5.3.2: integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== acorn-walk@^8.1.1: - version "8.3.3" - resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.3.tgz#9caeac29eefaa0c41e3d4c65137de4d6f34df43e" - integrity sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw== - dependencies: - acorn "^8.11.0" + version "8.3.2" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.2.tgz#7703af9415f1b6db9315d6895503862e231d34aa" + integrity sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A== acorn@^7.4.0: version "7.4.1" resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa" integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== -acorn@^8.11.0, acorn@^8.4.1, acorn@^8.9.0: - version "8.12.1" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.12.1.tgz#71616bdccbe25e27a54439e0046e89ca76df2248" - integrity sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg== +acorn@^8.4.1, acorn@^8.9.0: + version "8.11.3" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.11.3.tgz#71e0b14e13a4ec160724b38fb7b0f233b1b81d7a" + integrity sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg== adm-zip@^0.4.16: version "0.4.16" @@ -3060,14 +3289,14 @@ ajv@^6.10.0, ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.6: uri-js "^4.2.2" ajv@^8.0.1: - version "8.17.1" - resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.17.1.tgz#37d9a5c776af6bc92d7f4f9510eba4c0a60d11a6" - integrity sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g== + version "8.12.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.12.0.tgz#d1a0527323e22f53562c567c00991577dfbe19d1" + integrity sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA== dependencies: - fast-deep-equal "^3.1.3" - fast-uri "^3.0.1" + fast-deep-equal "^3.1.1" json-schema-traverse "^1.0.0" require-from-string "^2.0.2" + uri-js "^4.2.2" amdefine@>=0.0.4: version "1.0.1" @@ -3086,7 +3315,7 @@ ansi-colors@4.1.1: resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.1.tgz#cbb9ae256bf750af1eab344f229aa27fe94ba348" integrity sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA== -ansi-colors@^4.1.1, ansi-colors@^4.1.3: +ansi-colors@^4.1.1: version "4.1.3" resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.3.tgz#37611340eb2243e70cc604cad35d63270d48781b" integrity sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw== @@ -3119,9 +3348,9 @@ ansi-regex@^5.0.1: integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== ansi-regex@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.0.1.tgz#3183e38fae9a65d7cb5e53945cd5897d0260a06a" - integrity sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA== + version "6.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654" + integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== ansi-styles@^3.2.1: version "3.2.1" @@ -3153,9 +3382,9 @@ antlr4@^4.11.0: integrity sha512-kiXTspaRYvnIArgE97z5YVVf/cDVQABr3abFRR6mE7yesLMkgu4ujuyV/sgxafQ8wgve0DJQUJ38Z8tkgA2izA== antlr4@^4.13.1-patch-1: - version "4.13.1-patch-1" - resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.1-patch-1.tgz#946176f863f890964a050c4f18c47fd6f7e57602" - integrity sha512-OjFLWWLzDMV9rdFhpvroCWR4ooktNg9/nvVYSA5z28wuVpU36QUNuioR1XLnQtcjVlf8npjyz593PxnU/f/Cow== + version "4.13.2" + resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.2.tgz#0d084ad0e32620482a9c3a0e2470c02e72e4006d" + integrity sha512-QiVbZhyy4xAZ17UPEuG3YTOt8ZaoeOR1CvEAqrEsDBsOqINslaB147i9xqljZqoyf5S+EUlGStaj+t22LT9MOg== antlr4ts@^0.5.0-alpha.4: version "0.5.0-alpha.4" @@ -3338,11 +3567,6 @@ async@^2.4.0: dependencies: lodash "^4.17.14" -async@^3.2.3: - version "3.2.5" - resolved "https://registry.yarnpkg.com/async/-/async-3.2.5.tgz#ebd52a8fdaf7a2289a24df399f8d8485c8a46b66" - integrity sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg== - asynckit@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" @@ -3361,9 +3585,9 @@ aws-sign2@~0.7.0: integrity sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA== aws4@^1.8.0: - version "1.13.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.13.0.tgz#d9b802e9bb9c248d7be5f7f5ef178dc3684e9dcc" - integrity sha512-3AungXC4I8kKsS9PuS4JH2nc+0bVY/mjgrephHTIi8fpEeGsTHBUJeosp0Wc1myYMElmD0B3Oc4XL/HVJ4PV2g== + version "1.12.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.12.0.tgz#ce1c9d143389679e253b314241ea9aa5cec980d3" + integrity sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg== axios@^0.21.1: version "0.21.4" @@ -3372,7 +3596,16 @@ axios@^0.21.1: dependencies: follow-redirects "^1.14.0" -axios@^1.4.0, axios@^1.5.1, axios@^1.7.2: +axios@^1.4.0, axios@^1.5.1: + version "1.6.8" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.6.8.tgz#66d294951f5d988a00e87a0ffb955316a619ea66" + integrity sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ== + dependencies: + follow-redirects "^1.15.6" + form-data "^4.0.0" + proxy-from-env "^1.1.0" + +axios@^1.7.2: version "1.7.2" resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.2.tgz#b625db8a7051fbea61c35a3cbb3a1daa7b9c7621" integrity sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw== @@ -3467,9 +3700,9 @@ balanced-match@^1.0.0: integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== base-x@^3.0.2: - version "3.0.10" - resolved "https://registry.yarnpkg.com/base-x/-/base-x-3.0.10.tgz#62de58653f8762b5d6f8d9fe30fa75f7b2585a75" - integrity sha512-7d0s06rR9rYaIWHkpfLIFICM/tkSVdoPC9qYAQRpxn9DdKNWNsKC0uk++akckyLq16Tx2WIinnZ6WRriAt6njQ== + version "3.0.9" + resolved "https://registry.yarnpkg.com/base-x/-/base-x-3.0.9.tgz#6349aaabb58526332de9f60995e548a53fe21320" + integrity sha512-H7JU6iBHTal1gp56aKoaa//YUxEaAOUiydvrV/pILqIHXTtqxSkATOnDA2u+jZ/61sD+L/412+7kzXRtWukhpQ== dependencies: safe-buffer "^5.0.1" @@ -3576,19 +3809,19 @@ brace-expansion@^2.0.1: dependencies: balanced-match "^1.0.0" -braces@^3.0.3, braces@~3.0.2: - version "3.0.3" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" - integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== +braces@^3.0.2, braces@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== dependencies: - fill-range "^7.1.1" + fill-range "^7.0.1" brorand@^1.0.1, brorand@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" integrity sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w== -browser-stdout@1.3.1, browser-stdout@^1.3.1: +browser-stdout@1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== @@ -3605,15 +3838,15 @@ browserify-aes@^1.2.0: inherits "^2.0.1" safe-buffer "^5.0.1" -browserslist@^4.23.1: - version "4.23.2" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.2.tgz#244fe803641f1c19c28c48c4b6ec9736eb3d32ed" - integrity sha512-qkqSyistMYdxAcw+CzbZwlBy8AGmS/eEWs+sEV5TnLRGDOL+C5M2EnH6tlZyg0YoAxGJAFKh61En9BR941GnHA== +browserslist@^4.22.2: + version "4.23.0" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.0.tgz#8f3acc2bbe73af7213399430890f86c63a5674ab" + integrity sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ== dependencies: - caniuse-lite "^1.0.30001640" - electron-to-chromium "^1.4.820" + caniuse-lite "^1.0.30001587" + electron-to-chromium "^1.4.668" node-releases "^2.0.14" - update-browserslist-db "^1.1.0" + update-browserslist-db "^1.0.13" bs-logger@0.x: version "0.2.6" @@ -3767,10 +4000,10 @@ camelcase@^6.0.0, camelcase@^6.2.0: resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== -caniuse-lite@^1.0.30001640: - version "1.0.30001643" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001643.tgz#9c004caef315de9452ab970c3da71085f8241dbd" - integrity sha512-ERgWGNleEilSrHM6iUz/zJNSQTP8Mr21wDWpdgvRwcTXGAq6jMtOUPP4dqFPTdKqZ2wKTdtB+uucZ3MRpAUSmg== +caniuse-lite@^1.0.30001587: + version "1.0.30001608" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001608.tgz#7ae6e92ffb300e4b4ec2f795e0abab456ec06cc0" + integrity sha512-cjUJTQkk9fQlJR2s4HMuPMvTiRggl0rAVMtthQuyOlDWuqHXqN8azLq+pi8B2TjwKJ32diHjUqRIKeFX4z1FoA== caseless@^0.12.0, caseless@~0.12.0: version "0.12.0" @@ -3797,13 +4030,26 @@ cbor@^9.0.2: nofilter "^3.1.0" chai-as-promised@^7.1.1: - version "7.1.2" - resolved "https://registry.yarnpkg.com/chai-as-promised/-/chai-as-promised-7.1.2.tgz#70cd73b74afd519754161386421fb71832c6d041" - integrity sha512-aBDHZxRzYnUYuIAIPBH2s511DjlKPzXNlXSGFC8CwmroWQLfrW0LtE1nK3MAwwNhJPa9raEjNCmRoFpG0Hurdw== + version "7.1.1" + resolved "https://registry.yarnpkg.com/chai-as-promised/-/chai-as-promised-7.1.1.tgz#08645d825deb8696ee61725dbf590c012eb00ca0" + integrity sha512-azL6xMoi+uxu6z4rhWQ1jbdUhOMhis2PvscD/xjLqNMkv3BPPp2JyyuTHOrf9BOosGpNQ11v6BKv/g57RXbiaA== dependencies: check-error "^1.0.2" -chai@^4.3.10, chai@^4.3.4, chai@^4.3.6, chai@^4.3.7: +chai@^4.3.10, chai@^4.3.4, chai@^4.3.6: + version "4.4.1" + resolved "https://registry.yarnpkg.com/chai/-/chai-4.4.1.tgz#3603fa6eba35425b0f2ac91a009fe924106e50d1" + integrity sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g== + dependencies: + assertion-error "^1.1.0" + check-error "^1.0.3" + deep-eql "^4.1.3" + get-func-name "^2.0.2" + loupe "^2.3.6" + pathval "^1.1.1" + type-detect "^4.0.8" + +chai@^4.3.7: version "4.5.0" resolved "https://registry.yarnpkg.com/chai/-/chai-4.5.0.tgz#707e49923afdd9b13a8b0b47d33d732d13812fd8" integrity sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw== @@ -3816,7 +4062,7 @@ chai@^4.3.10, chai@^4.3.4, chai@^4.3.6, chai@^4.3.7: pathval "^1.1.1" type-detect "^4.1.0" -chalk@4.1.2, chalk@^4.0.0, chalk@^4.0.2, chalk@^4.1.0, chalk@^4.1.2: +chalk@4.1.2, chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== @@ -3870,7 +4116,7 @@ chokidar@3.5.3: optionalDependencies: fsevents "~2.3.2" -chokidar@^3.4.0, chokidar@^3.5.3: +chokidar@^3.4.0: version "3.6.0" resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.6.0.tgz#197c6cc669ef2a8dc5e7b4d97ee4e092c3eb0d5b" integrity sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw== @@ -3885,6 +4131,13 @@ chokidar@^3.4.0, chokidar@^3.5.3: optionalDependencies: fsevents "~2.3.2" +chokidar@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-4.0.1.tgz#4a6dff66798fb0f72a94f616abbd7e1a19f31d41" + integrity sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA== + dependencies: + readdirp "^4.0.1" + chownr@^1.0.1, chownr@^1.1.1: version "1.1.4" resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b" @@ -3909,9 +4162,9 @@ cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: safe-buffer "^5.0.1" cjs-module-lexer@^1.0.0: - version "1.3.1" - resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.3.1.tgz#c485341ae8fd999ca4ee5af2d7a1c9ae01e0099c" - integrity sha512-a3KdPAANPbNE4ZUv9h6LckSl9zLsYOP4MBmhIPkRaeyybt+r4UghLvq+xw/YwUcC1gqylCkL4rdVs3Lwupjm4Q== + version "1.2.3" + resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz#6c370ab19f8a3394e318fe682686ec0ac684d107" + integrity sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ== clean-stack@^2.0.0: version "2.2.0" @@ -3941,9 +4194,9 @@ cli-table3@^0.5.0: colors "^1.1.2" cli-table3@^0.6.0: - version "0.6.5" - resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.5.tgz#013b91351762739c16a9567c21a04632e449bf2f" - integrity sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ== + version "0.6.4" + resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.4.tgz#d1c536b8a3f2e7bec58f67ac9e5769b1b30088b0" + integrity sha512-Lm3L0p+/npIQWNIiyF/nAn7T5dnOwR3xNTHXYEBFBFVPXzCVNZ5lqEC/1eo/EVfpDsQ1I+TX4ORPQgp+UI0CRw== dependencies: string-width "^4.2.0" optionalDependencies: @@ -3983,9 +4236,9 @@ code-block-writer@^12.0.0: integrity sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w== code-block-writer@^13.0.1: - version "13.0.2" - resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-13.0.2.tgz#e1c6c3dbe5d38b4ac76fb62c4d4b2fc4bf04c9c1" - integrity sha512-XfXzAGiStXSmCIwrkdfvc7FS5Dtj8yelCtyOf2p2skCAfvLd6zu0rGzuS9NSCO3bq1JKpFZ7tbKdKlcd5occQA== + version "13.0.3" + resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-13.0.3.tgz#90f8a84763a5012da7af61319dd638655ae90b5b" + integrity sha512-Oofo0pq3IKnsFtuHqSF7TqBfr71aeyZDVJ0HpmqB7FBM2qEigL0iPONSCZSO9pE9dZTAxANe5XHG9Uy0YMv8cg== collect-v8-coverage@^1.0.0: version "1.0.2" @@ -4145,9 +4398,9 @@ cookie@^0.4.1: integrity sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA== core-js-pure@^3.0.1: - version "3.37.1" - resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.37.1.tgz#2b4b34281f54db06c9a9a5bd60105046900553bd" - integrity sha512-J/r5JTHSmzTxbiYYrzXg9w1VpqrYt+gexenBE9pugeyhwPZTAEJddyiReJWsLO6uNQ8xJZFbod6XC7KKwatCiA== + version "3.36.1" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.36.1.tgz#1461c89e76116528b54eba20a0aff30164087a94" + integrity sha512-NXCvHvSVYSrewP0L5OhltzXeWFJLo2AL2TYnj6iLV3Bw8mM62wAQMNgUCRI6EBu6hVVpbCxmOPlxh1Ikw2PfUA== core-js@^2.4.0: version "2.6.12" @@ -4175,12 +4428,12 @@ cosmiconfig@^8.0.0: path-type "^4.0.0" cpu-features@~0.0.9: - version "0.0.10" - resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.10.tgz#9aae536db2710c7254d7ed67cb3cbc7d29ad79c5" - integrity sha512-9IkYqtX3YHPCzoVg1Py+o9057a3i0fp7S530UWokCSaFVTc7CwXPRiOjRjBQQ18ZCNafx78YfnG+HALxtVmOGA== + version "0.0.9" + resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.9.tgz#5226b92f0f1c63122b0a3eb84cb8335a4de499fc" + integrity sha512-AKjgn2rP2yJyfbepsmLfiYcmtNn/2eUvocUyM/09yB0YDiz39HteK/5/T4Onf0pmdYDMgkBoGvRLvEguzyL7wQ== dependencies: buildcheck "~0.0.6" - nan "^2.19.0" + nan "^2.17.0" crc-32@^1.2.0: version "1.2.2" @@ -4297,10 +4550,10 @@ death@^1.1.0: resolved "https://registry.yarnpkg.com/death/-/death-1.1.0.tgz#01aa9c401edd92750514470b8266390c66c67318" integrity sha512-vsV6S4KVHvTGxbEcij7hkWRv0It+sGGWVOM67dQde/o5Xjnr+KmLjxWJii2uEObIrt1CcM9w0Yaovx+iOlIL+w== -debug@4, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.3, debug@^4.3.4, debug@^4.3.5: - version "4.3.6" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.6.tgz#2ab2c38fbaffebf8aa95fdfe6d88438c7a13c52b" - integrity sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg== +debug@4, debug@4.3.4, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.3, debug@^4.3.4: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== dependencies: ms "2.1.2" @@ -4325,6 +4578,13 @@ debug@^3.1.0, debug@^3.2.6, debug@^3.2.7: dependencies: ms "^2.1.1" +debug@^4.3.5: + version "4.3.5" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.5.tgz#e83444eceb9fedd4a1da56d671ae2446a01a6e1e" + integrity sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg== + dependencies: + ms "2.1.2" + decamelize@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-4.0.0.tgz#aa472d7bf660eb15f3494efd531cab7f2a709837" @@ -4338,14 +4598,14 @@ decompress-response@^6.0.0: mimic-response "^3.1.0" dedent@^1.0.0: - version "1.5.3" - resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.3.tgz#99aee19eb9bae55a67327717b6e848d0bf777e5a" - integrity sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ== + version "1.5.1" + resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.1.tgz#4f3fc94c8b711e9bb2800d185cd6ad20f2a90aff" + integrity sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg== deep-eql@^4.0.1, deep-eql@^4.1.3: - version "4.1.4" - resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-4.1.4.tgz#d0d3912865911bb8fac5afb4e3acfa6a28dc72b7" - integrity sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg== + version "4.1.3" + resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-4.1.3.tgz#7c7775513092f7df98d8df9996dd085eb668cc6d" + integrity sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw== dependencies: type-detect "^4.0.0" @@ -4391,7 +4651,7 @@ define-data-property@^1.0.1, define-data-property@^1.1.4: es-errors "^1.3.0" gopd "^1.0.1" -define-properties@^1.2.0, define-properties@^1.2.1: +define-properties@^1.1.3, define-properties@^1.2.0, define-properties@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c" integrity sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg== @@ -4430,7 +4690,7 @@ diff@^4.0.1: resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== -diff@^5.2.0: +diff@^5.1.0, diff@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/diff/-/diff-5.2.0.tgz#26ded047cd1179b78b9537d5ef725503ce1ae531" integrity sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A== @@ -4555,17 +4815,10 @@ ecdsa-sig-formatter@1.0.11: dependencies: safe-buffer "^5.0.1" -ejs@^3.1.10: - version "3.1.10" - resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.10.tgz#69ab8358b14e896f80cc39e62087b88500c3ac3b" - integrity sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA== - dependencies: - jake "^10.8.5" - -electron-to-chromium@^1.4.820: - version "1.5.2" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.2.tgz#6126ad229ce45e781ec54ca40db0504787f23d19" - integrity sha512-kc4r3U3V3WLaaZqThjYz/Y6z8tJe+7K0bbjUVo3i+LWIypVdMx5nXCkwRe6SWbY6ILqLdc1rKcKmr3HoH7wjSQ== +electron-to-chromium@^1.4.668: + version "1.4.731" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.731.tgz#d3dc19f359045b750a1fb0bc42315a502d950187" + integrity sha512-+TqVfZjpRz2V/5SPpmJxq9qK620SC5SqCnxQIOi7i/U08ZDcTpKbT7Xjj9FU5CbXTMUb4fywbIr8C7cGv4hcjw== elliptic@6.5.4: version "6.5.4" @@ -4581,9 +4834,9 @@ elliptic@6.5.4: minimalistic-crypto-utils "^1.0.1" elliptic@^6.5.2, elliptic@^6.5.4, elliptic@^6.5.5: - version "6.5.6" - resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.6.tgz#ee5f7c3a00b98a2144ac84d67d01f04d438fa53e" - integrity sha512-mpzdtpeCLuS3BmE3pO3Cpp5bbjlOPY2Q0PgoF+Od1XZrHLYI28Xe3ossCmYCQt11FQKEYd9+PF8jymTvtWJSHQ== + version "6.5.5" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.5.tgz#c715e09f78b6923977610d4c2346d6ce22e6dded" + integrity sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw== dependencies: bn.js "^4.11.9" brorand "^1.1.0" @@ -4636,9 +4889,9 @@ end-of-stream@^1.0.0, end-of-stream@^1.1.0, end-of-stream@^1.4.1: once "^1.4.0" enhanced-resolve@^5.12.0: - version "5.17.1" - resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz#67bfbbcc2f81d511be77d686a90267ef7f898a15" - integrity sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg== + version "5.16.0" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.16.0.tgz#65ec88778083056cb32487faa9aef82ed0864787" + integrity sha512-O+QWCviPNSSLAD9Ucn8Awv+poAkqn3T1XY5/N7kR7rQO9yfSGWkYZDwpJ+iKF7B8rxaQKWngSqACpgzeapSyoA== dependencies: graceful-fs "^4.2.4" tapable "^2.2.0" @@ -4781,7 +5034,7 @@ es6-promisify@^6.0.0: resolved "https://registry.yarnpkg.com/es6-promisify/-/es6-promisify-6.1.1.tgz#46837651b7b06bf6fff893d03f29393668d01621" integrity sha512-HBL8I3mIki5C1Cc9QjKUenHtnG0A5/xA8Q/AllRcfiwl2CZFXGK7ddBiCoRwAix4i2KxcQfjtIVcrVbB3vbmwg== -escalade@^3.1.1, escalade@^3.1.2: +escalade@^3.1.1: version "3.1.2" resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.2.tgz#54076e9ab29ea5bf3d8f1ed62acffbb88272df27" integrity sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA== @@ -4871,12 +5124,12 @@ eslint-plugin-import@^2.29.0: tsconfig-paths "^3.15.0" eslint-plugin-prettier@^5.0.1: - version "5.2.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-5.2.1.tgz#d1c8f972d8f60e414c25465c163d16f209411f95" - integrity sha512-gH3iR3g4JfF+yYPaJYkN7jEl9QbweL/YfkoRlNnuIEHEz1vHVlCmWOS+eGGiRuzHQXdJFCOTxRgvju9b8VUmrw== + version "5.1.3" + resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-5.1.3.tgz#17cfade9e732cef32b5f5be53bd4e07afd8e67e1" + integrity sha512-C9GCVAs4Eq7ZC/XFQHITLiHJxQngdtraXaM+LoUFoFp/lHNl2Zn8f3WQbe9HvTBBQ9YnKFB0/2Ajdqwo5D1EAw== dependencies: prettier-linter-helpers "^1.0.0" - synckit "^0.9.1" + synckit "^0.8.6" eslint-scope@^5.1.1: version "5.1.1" @@ -5035,9 +5288,9 @@ esprima@^4.0.0: integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== esquery@^1.4.0, esquery@^1.4.2: - version "1.6.0" - resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.6.0.tgz#91419234f804d852a82dceec3e16cdc22cf9dae7" - integrity sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg== + version "1.5.0" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.5.0.tgz#6ce17738de8577694edd7361c57182ac8cb0db0b" + integrity sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg== dependencies: estraverse "^5.1.0" @@ -5088,9 +5341,9 @@ eth-gas-reporter@^0.2.25: sync-request "^6.0.0" ethereum-bloom-filters@^1.0.6: - version "1.2.0" - resolved "https://registry.yarnpkg.com/ethereum-bloom-filters/-/ethereum-bloom-filters-1.2.0.tgz#8294f074c1a6cbd32c39d2cc77ce86ff14797dab" - integrity sha512-28hyiE7HVsWubqhpVLVmZXFd4ITeHi+BUu05o9isf0GUpMtzBUi+8/gFrGaGYzvGAJQmJ3JKj77Mk9G98T84rA== + version "1.1.0" + resolved "https://registry.yarnpkg.com/ethereum-bloom-filters/-/ethereum-bloom-filters-1.1.0.tgz#b3fc1eb789509ee30db0bf99a2988ccacb8d0397" + integrity sha512-J1gDRkLpuGNvWYzWslBQR9cDV4nd4kfvVTE/Wy4Kkm4yb3EYRSlyi0eB/inTsSTTVyA0+HyzHgbr95Fn/Z1fSw== dependencies: "@noble/hashes" "^1.4.0" @@ -5126,14 +5379,14 @@ ethereum-cryptography@^1.0.3: "@scure/bip39" "1.1.1" ethereum-cryptography@^2.0.0, ethereum-cryptography@^2.1.2: - version "2.2.1" - resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-2.2.1.tgz#58f2810f8e020aecb97de8c8c76147600b0b8ccf" - integrity sha512-r/W8lkHSiTLxUxW8Rf3u4HGB0xQweG2RyETjywylKZSzLWoWAijRz8WCuOtJ6wah+avllXBqZuk29HCCvhEIRg== + version "2.1.3" + resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-2.1.3.tgz#1352270ed3b339fe25af5ceeadcf1b9c8e30768a" + integrity sha512-BlwbIL7/P45W8FGW2r7LGuvoEZ+7PWsniMvQ4p5s2xCyw9tmaDlpfsN9HjAucbF+t/qpVHwZUisgfK24TCW8aA== dependencies: - "@noble/curves" "1.4.2" - "@noble/hashes" "1.4.0" - "@scure/bip32" "1.4.0" - "@scure/bip39" "1.3.0" + "@noble/curves" "1.3.0" + "@noble/hashes" "1.3.3" + "@scure/bip32" "1.3.3" + "@scure/bip39" "1.2.2" ethereum-waffle@^4.0.10: version "4.0.10" @@ -5227,9 +5480,9 @@ ethers@^5.0.2, ethers@^5.7.0, ethers@^5.7.2, ethers@~5.7.0, ethers@~5.7.2: "@ethersproject/wordlists" "5.7.0" ethers@^6.7.1: - version "6.13.2" - resolved "https://registry.yarnpkg.com/ethers/-/ethers-6.13.2.tgz#4b67d4b49e69b59893931a032560999e5e4419fe" - integrity sha512-9VkriTTed+/27BGuY1s0hf441kqwHJ1wtN2edksEtiRvXx+soxRX3iSXTfFqq2+YwrOqbDoTHjIhQnjJRlzKmg== + version "6.12.1" + resolved "https://registry.yarnpkg.com/ethers/-/ethers-6.12.1.tgz#517ff6d66d4fd5433e38e903051da3e57c87ff37" + integrity sha512-j6wcVoZf06nqEcBbDWkKg8Fp895SS96dSnTCjiXT+8vt2o02raTn4Lo9ERUuIVU5bAjoPYeA+7ytQFexFmLuVw== dependencies: "@adraffy/ens-normalize" "1.10.1" "@noble/curves" "1.2.0" @@ -5237,7 +5490,7 @@ ethers@^6.7.1: "@types/node" "18.15.13" aes-js "4.0.0-beta.5" tslib "2.4.0" - ws "8.17.1" + ws "8.5.0" ethers@~5.5.0: version "5.5.4" @@ -5484,11 +5737,6 @@ fast-levenshtein@^2.0.6, fast-levenshtein@~2.0.6: resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== -fast-uri@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.0.1.tgz#cddd2eecfc83a71c1be2cc2ef2061331be8a7134" - integrity sha512-MWipKbbYiYI0UC7cl8m/i/IWTqfC8YXsqjzybjddLsFjStroQzsHXkc73JutMvBiXmOvapk+axIl79ig5t55Bw== - fastq@^1.6.0: version "1.17.1" resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" @@ -5517,17 +5765,10 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -filelist@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/filelist/-/filelist-1.0.4.tgz#f78978a1e944775ff9e62e744424f215e58352b5" - integrity sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q== - dependencies: - minimatch "^5.0.1" - -fill-range@^7.1.1: - version "7.1.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" - integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== dependencies: to-regex-range "^5.0.1" @@ -5601,9 +5842,9 @@ for-each@^0.3.3: is-callable "^1.1.3" foreground-child@^3.1.0: - version "3.2.1" - resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.2.1.tgz#767004ccf3a5b30df39bed90718bab43fe0a59f7" - integrity sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA== + version "3.3.0" + resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.3.0.tgz#0ac8644c06e431439f8561db8ecf29a7b5519c77" + integrity sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg== dependencies: cross-spawn "^7.0.0" signal-exit "^4.0.1" @@ -5826,9 +6067,9 @@ get-symbol-description@^1.0.2: get-intrinsic "^1.2.4" get-tsconfig@^4.5.0: - version "4.7.6" - resolved "https://registry.yarnpkg.com/get-tsconfig/-/get-tsconfig-4.7.6.tgz#118fd5b7b9bae234cc7705a00cd771d7eb65d62a" - integrity sha512-ZAqrLlu18NbDdRaHq+AKXzAmqIUPswPWKUchfytdAjiRFnCe5ojG2bstg6mRiZabkKfCoL/e98pbBELIV/YCeA== + version "4.7.3" + resolved "https://registry.yarnpkg.com/get-tsconfig/-/get-tsconfig-4.7.3.tgz#0498163d98f7b58484dd4906999c0c9d5f103f83" + integrity sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg== dependencies: resolve-pkg-maps "^1.0.0" @@ -5885,6 +6126,17 @@ glob@7.2.0: once "^1.3.0" path-is-absolute "^1.0.0" +glob@8.1.0, glob@^8.0.3: + version "8.1.0" + resolved "https://registry.yarnpkg.com/glob/-/glob-8.1.0.tgz#d388f656593ef708ee3e34640fdfb99a9fd1c33e" + integrity sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^5.0.1" + once "^1.3.0" + glob@^10.4.1: version "10.4.5" resolved "https://registry.yarnpkg.com/glob/-/glob-10.4.5.tgz#f4d9f0b90ffdbab09c9d77f5f29b4262517b0956" @@ -5920,17 +6172,6 @@ glob@^7.0.0, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^8.0.3, glob@^8.1.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/glob/-/glob-8.1.0.tgz#d388f656593ef708ee3e34640fdfb99a9fd1c33e" - integrity sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^5.0.1" - once "^1.3.0" - glob@~8.0.3: version "8.0.3" resolved "https://registry.yarnpkg.com/glob/-/glob-8.0.3.tgz#415c6eb2deed9e502c68fa44a272e6da6eeca42e" @@ -5971,12 +6212,11 @@ globals@^13.19.0, globals@^13.6.0, globals@^13.9.0: type-fest "^0.20.2" globalthis@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.4.tgz#7430ed3a975d97bfb59bcce41f5cabbafa651236" - integrity sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ== + version "1.0.3" + resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.3.tgz#5852882a52b80dc301b0660273e1ed082f0b6ccf" + integrity sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA== dependencies: - define-properties "^1.2.1" - gopd "^1.0.1" + define-properties "^1.1.3" globby@^10.0.1: version "10.0.2" @@ -6151,13 +6391,13 @@ hardhat@=2.22.2: ws "^7.4.6" hardhat@^2.14.0: - version "2.22.10" - resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.10.tgz#826ab56e47af98406e6dd105ba6d2dbb148013d9" - integrity sha512-JRUDdiystjniAvBGFmJRsiIZSOP2/6s++8xRDe3TzLeQXlWWHsXBrd9wd3JWFyKXvgMqMeLL5Sz/oNxXKYw9vg== + version "2.22.12" + resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.12.tgz#a6d0be011fc009c50c454da367ad28c29f58d446" + integrity sha512-yok65M+LsOeTBHQsjg//QreGCyrsaNmeLVzhTFqlOvZ4ZE5y69N0wRxH1b2BC9dGK8S8OPUJMNiL9X0RAvbm8w== dependencies: "@ethersproject/abi" "^5.1.2" "@metamask/eth-sig-util" "^4.0.0" - "@nomicfoundation/edr" "^0.5.2" + "@nomicfoundation/edr" "^0.6.1" "@nomicfoundation/ethereumjs-common" "4.0.4" "@nomicfoundation/ethereumjs-tx" "5.0.4" "@nomicfoundation/ethereumjs-util" "9.0.4" @@ -6170,7 +6410,7 @@ hardhat@^2.14.0: ansi-escapes "^4.3.0" boxen "^5.1.2" chalk "^2.4.2" - chokidar "^3.4.0" + chokidar "^4.0.0" ci-info "^2.0.0" debug "^4.1.1" enquirer "^2.3.0" @@ -6183,6 +6423,7 @@ hardhat@^2.14.0: glob "7.2.0" immutable "^4.0.0-rc.12" io-ts "1.10.4" + json-stream-stringify "^3.1.4" keccak "^3.0.2" lodash "^4.17.11" mnemonist "^0.38.0" @@ -6199,6 +6440,55 @@ hardhat@^2.14.0: uuid "^8.3.2" ws "^7.4.6" +hardhat@^2.22.5: + version "2.22.5" + resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.5.tgz#7e1a4311fa9e34a1cfe337784eae06706f6469a5" + integrity sha512-9Zq+HonbXCSy6/a13GY1cgHglQRfh4qkzmj1tpPlhxJDwNVnhxlReV6K7hCWFKlOrV13EQwsdcD0rjcaQKWRZw== + dependencies: + "@ethersproject/abi" "^5.1.2" + "@metamask/eth-sig-util" "^4.0.0" + "@nomicfoundation/edr" "^0.4.0" + "@nomicfoundation/ethereumjs-common" "4.0.4" + "@nomicfoundation/ethereumjs-tx" "5.0.4" + "@nomicfoundation/ethereumjs-util" "9.0.4" + "@nomicfoundation/solidity-analyzer" "^0.1.0" + "@sentry/node" "^5.18.1" + "@types/bn.js" "^5.1.0" + "@types/lru-cache" "^5.1.0" + adm-zip "^0.4.16" + aggregate-error "^3.0.0" + ansi-escapes "^4.3.0" + boxen "^5.1.2" + chalk "^2.4.2" + chokidar "^3.4.0" + ci-info "^2.0.0" + debug "^4.1.1" + enquirer "^2.3.0" + env-paths "^2.2.0" + ethereum-cryptography "^1.0.3" + ethereumjs-abi "^0.6.8" + find-up "^2.1.0" + fp-ts "1.19.3" + fs-extra "^7.0.1" + glob "7.2.0" + immutable "^4.0.0-rc.12" + io-ts "1.10.4" + keccak "^3.0.2" + lodash "^4.17.11" + mnemonist "^0.38.0" + mocha "^10.0.0" + p-map "^4.0.0" + raw-body "^2.4.1" + resolve "1.17.0" + semver "^6.3.0" + solc "0.7.3" + source-map-support "^0.5.13" + stacktrace-parser "^0.1.10" + tsort "0.0.1" + undici "^5.14.0" + uuid "^8.3.2" + ws "^7.4.6" + has-bigints@^1.0.1, has-bigints@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa" @@ -6267,7 +6557,7 @@ hasown@^2.0.0, hasown@^2.0.1, hasown@^2.0.2: dependencies: function-bind "^1.1.2" -he@1.2.0, he@^1.2.0: +he@1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== @@ -6402,9 +6692,9 @@ immediate@~3.2.3: integrity sha512-RrGCXRm/fRVqMIhqXrGEX9rRADavPiDFSoMb/k64i9XMk8uH4r/Omi5Ctierj6XzNecwDbO4WuFbDD1zmpl3Tg== immutable@^4.0.0-rc.12: - version "4.3.7" - resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.3.7.tgz#c70145fc90d89fb02021e65c84eb0226e4e5a381" - integrity sha512-1hqclzwYwjRDFLjcFxOM5AYkkG0rpFPpr1RLPMEuGczoS7YA8gLhy8SWXYRAA/XwfEHpfo3cw5JGioS32fnMRw== + version "4.3.5" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.3.5.tgz#f8b436e66d59f99760dc577f5c99a4fd2a5cc5a0" + integrity sha512-8eabxkth9gZatlwl5TBuJnCsoTADlL6ftEr7A4qgdaTsPyreilDSnUk57SO+jfKcNtxPa22U5KK6DSeAYhpBJw== import-fresh@^3.0.0, import-fresh@^3.2.1, import-fresh@^3.3.0: version "3.3.0" @@ -6415,9 +6705,9 @@ import-fresh@^3.0.0, import-fresh@^3.2.1, import-fresh@^3.3.0: resolve-from "^4.0.0" import-local@^3.0.2: - version "3.2.0" - resolved "https://registry.yarnpkg.com/import-local/-/import-local-3.2.0.tgz#c3d5c745798c02a6f8b897726aba5100186ee260" - integrity sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA== + version "3.1.0" + resolved "https://registry.yarnpkg.com/import-local/-/import-local-3.1.0.tgz#b4479df8a5fd44f6cdce24070675676063c95cb4" + integrity sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg== dependencies: pkg-dir "^4.2.0" resolve-cwd "^3.0.0" @@ -6546,11 +6836,11 @@ is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.7: integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== is-core-module@^2.11.0, is-core-module@^2.13.0, is-core-module@^2.13.1: - version "2.15.0" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.15.0.tgz#71c72ec5442ace7e76b306e9d48db361f22699ea" - integrity sha512-Dd+Lb2/zvk9SKy1TGCt1wFJFo/MWBPMX5x7KcvLajWTGuomczdQX61PvY5yK6SVACwpoexWo81IfFyoKY2QnTA== + version "2.13.1" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384" + integrity sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw== dependencies: - hasown "^2.0.2" + hasown "^2.0.0" is-data-view@^1.0.1: version "1.0.1" @@ -6735,9 +7025,9 @@ istanbul-lib-instrument@^5.0.4: semver "^6.3.0" istanbul-lib-instrument@^6.0.0: - version "6.0.3" - resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz#fa15401df6c15874bcb2105f773325d78c666765" - integrity sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q== + version "6.0.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.2.tgz#91655936cf7380e4e473383081e38478b69993b1" + integrity sha512-1WUsZ9R1lA0HtBSohTkm39WTPlNKSJ5iFk7UwqXkBLoHQT+hfqPsfsTDVuZdKGaBwn7din9bS7SsnoAr943hvw== dependencies: "@babel/core" "^7.23.9" "@babel/parser" "^7.23.9" @@ -6780,16 +7070,6 @@ jackspeak@^3.1.2: optionalDependencies: "@pkgjs/parseargs" "^0.11.0" -jake@^10.8.5: - version "10.9.2" - resolved "https://registry.yarnpkg.com/jake/-/jake-10.9.2.tgz#6ae487e6a69afec3a5e167628996b59f35ae2b7f" - integrity sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA== - dependencies: - async "^3.2.3" - chalk "^4.0.2" - filelist "^1.0.4" - minimatch "^3.1.2" - jest-changed-files@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a" @@ -7238,6 +7518,11 @@ json-stable-stringify-without-jsonify@^1.0.1: resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== +json-stream-stringify@^3.1.4: + version "3.1.5" + resolved "https://registry.yarnpkg.com/json-stream-stringify/-/json-stream-stringify-3.1.5.tgz#7184383b397a83ac5da33b62371217522e6ac2f6" + integrity sha512-wurRuTiw27mck9MWaUIGAunfwqhPDxnXQVN/+Rzi+IEQUUALU10AZs1nWkSdtjH7PAVuAUcqQjH11S/JHOWeaA== + json-stringify-safe@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" @@ -7663,7 +7948,7 @@ lodash@^4.17.11, lodash@^4.17.12, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17 resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== -log-symbols@4.1.0, log-symbols@^4.1.0: +log-symbols@4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== @@ -7700,6 +7985,13 @@ lru-cache@^5.1.1: dependencies: yallist "^3.0.2" +lru-cache@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" + integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== + dependencies: + yallist "^4.0.0" + lru_map@^0.3.3: version "0.3.3" resolved "https://registry.yarnpkg.com/lru_map/-/lru_map-0.3.3.tgz#b5c8351b9464cbd750335a79650a0ec0e56118dd" @@ -7885,11 +8177,11 @@ micro-ftch@^0.3.1: integrity sha512-/0LLxhzP0tfiR5hcQebtudP56gUurs2CLkGarnCiB/OqEyUFQ6U3paQi/tgLv0hBJYt2rnr9MNpxz4fiiugstg== micromatch@^4.0.4: - version "4.0.7" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.7.tgz#33e8190d9fe474a9895525f5618eee136d46c2e5" - integrity sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q== + version "4.0.5" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" + integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== dependencies: - braces "^3.0.3" + braces "^3.0.2" picomatch "^2.3.1" miller-rabin@^4.0.0: @@ -7956,6 +8248,13 @@ minimatch@4.2.1: dependencies: brace-expansion "^1.1.7" +minimatch@5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.0.1.tgz#fb9022f7528125187c92bd9e9b6366be1cf3415b" + integrity sha512-nLDxIFRyhDblz3qMuq+SoRZED4+miJ/G+tdDrjkkkRnjAsBexeGpgjLEQ0blJy7rHhR2b93rhQY4SvyWu9v03g== + dependencies: + brace-expansion "^2.0.1" + minimatch@9.0.3: version "9.0.3" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825" @@ -7963,7 +8262,7 @@ minimatch@9.0.3: dependencies: brace-expansion "^2.0.1" -minimatch@^5.0.1, minimatch@^5.1.6, minimatch@~5.1.2: +minimatch@^5.0.1, minimatch@~5.1.2: version "5.1.6" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96" integrity sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g== @@ -7977,14 +8276,7 @@ minimatch@^7.4.3: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.3: - version "9.0.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" - integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== - dependencies: - brace-expansion "^2.0.1" - -minimatch@^9.0.4: +minimatch@^9.0.3, minimatch@^9.0.4: version "9.0.5" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== @@ -8003,12 +8295,7 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8, minimist@~1. resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0": - version "7.1.1" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.1.tgz#f7f85aff59aa22f110b20e27692465cf3bf89481" - integrity sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA== - -minipass@^7.1.2: +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.1.2: version "7.1.2" resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== @@ -8053,30 +8340,30 @@ mocha-steps@^1.3.0: integrity sha512-KZvpMJTqzLZw3mOb+EEuYi4YZS41C9iTnb7skVFRxHjUd1OYbl64tCMSmpdIRM9LnwIrSOaRfPtNpF5msgv6Eg== mocha@^10.0.0, mocha@^10.2.0: - version "10.7.0" - resolved "https://registry.yarnpkg.com/mocha/-/mocha-10.7.0.tgz#9e5cbed8fa9b37537a25bd1f7fb4f6fc45458b9a" - integrity sha512-v8/rBWr2VO5YkspYINnvu81inSz2y3ODJrhO175/Exzor1RcEZZkizgE2A+w/CAXXoESS8Kys5E62dOHGHzULA== + version "10.4.0" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-10.4.0.tgz#ed03db96ee9cfc6d20c56f8e2af07b961dbae261" + integrity sha512-eqhGB8JKapEYcC4ytX/xrzKforgEc3j1pGlAXVy3eRwrtAy5/nIfT1SvgGzfN0XZZxeLq0aQWkOUAmqIJiv+bA== dependencies: - ansi-colors "^4.1.3" - browser-stdout "^1.3.1" - chokidar "^3.5.3" - debug "^4.3.5" - diff "^5.2.0" - escape-string-regexp "^4.0.0" - find-up "^5.0.0" - glob "^8.1.0" - he "^1.2.0" - js-yaml "^4.1.0" - log-symbols "^4.1.0" - minimatch "^5.1.6" - ms "^2.1.3" - serialize-javascript "^6.0.2" - strip-json-comments "^3.1.1" - supports-color "^8.1.1" - workerpool "^6.5.1" - yargs "^16.2.0" - yargs-parser "^20.2.9" - yargs-unparser "^2.0.0" + ansi-colors "4.1.1" + browser-stdout "1.3.1" + chokidar "3.5.3" + debug "4.3.4" + diff "5.0.0" + escape-string-regexp "4.0.0" + find-up "5.0.0" + glob "8.1.0" + he "1.2.0" + js-yaml "4.1.0" + log-symbols "4.1.0" + minimatch "5.0.1" + ms "2.1.3" + serialize-javascript "6.0.0" + strip-json-comments "3.1.1" + supports-color "8.1.1" + workerpool "6.2.1" + yargs "16.2.0" + yargs-parser "20.2.4" + yargs-unparser "2.0.0" mocha@^9.0.2: version "9.2.2" @@ -8123,7 +8410,7 @@ ms@2.1.2: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@2.1.3, ms@^2.1.1, ms@^2.1.3: +ms@2.1.3, ms@^2.1.1: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== @@ -8133,10 +8420,10 @@ mute-stream@0.0.7: resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.7.tgz#3075ce93bc21b8fab43e1bc4da7e8115ed1e7bab" integrity sha512-r65nCZhrbXXb6dXOACihYApHw2Q6pV0M3V0PSxd74N0+D8nzAdEAITq2oAjA1jVnKI+tGvEBUpqiMh0+rW6zDQ== -nan@^2.18.0, nan@^2.19.0: - version "2.20.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.20.0.tgz#08c5ea813dd54ed16e5bd6505bf42af4f7838ca3" - integrity sha512-bk3gXBZDGILuuo/6sKtr0DQmSThYHLtNCdSdXk9YkxD/jK6X2vmCyyXBBxyqZ4XcnzTyYEAThfX3DCEnLf6igw== +nan@^2.17.0, nan@^2.18.0: + version "2.19.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.19.0.tgz#bb58122ad55a6c5bc973303908d5b16cfdd5a8c0" + integrity sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw== nanoid@3.3.1: version "3.3.1" @@ -8173,7 +8460,7 @@ nice-try@^1.0.4: resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== -nise@^5.1.9: +nise@^5.1.5: version "5.1.9" resolved "https://registry.yarnpkg.com/nise/-/nise-5.1.9.tgz#0cb73b5e4499d738231a473cd89bd8afbb618139" integrity sha512-qOnoujW4SV6e40dYxJOb3uvuoPHtmLzIk4TFo+j0jPJoC+5Z9xja5qH5JZobEPsa8+YYphMrOSwnrshEhG2qww== @@ -8225,9 +8512,9 @@ node-gyp-build@4.4.0: integrity sha512-amJnQCcgtRVw9SvoebO3BKGESClrfXGCUTX9hSn1OuGQTQBOZmVd0Z0OlecpuRksKvbsUqALE8jls/ErClAPuQ== node-gyp-build@^4.2.0, node-gyp-build@^4.3.0: - version "4.8.1" - resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.8.1.tgz#976d3ad905e71b76086f4f0b0d3637fe79b6cda5" - integrity sha512-OSs33Z9yWr148JZcbZd5WiAXhh/n9z8TxQcdMhIOlpN9AhWpLfvVFO73+m77bBABQMaY9XSvIa+qk0jlI7Gcaw== + version "4.8.0" + resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.8.0.tgz#3fee9c1731df4581a3f9ead74664369ff00d26dd" + integrity sha512-u6fs2AEUljNho3EYTJNBfImO5QTo/J/1Etd+NVdCj7qWKUSN/bSLkZwhDv7I+w/MSC6qJ4cknepkAYykDdK8og== node-int64@^0.4.0: version "0.4.0" @@ -8235,9 +8522,9 @@ node-int64@^0.4.0: integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw== node-releases@^2.0.14: - version "2.0.18" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.18.tgz#f010e8d35e2fe8d6b2944f03f70213ecedc4ca3f" - integrity sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g== + version "2.0.14" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.14.tgz#2ffb053bceb8b2be8495ece1ab6ce600c4461b0b" + integrity sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw== nofilter@^3.1.0: version "3.1.0" @@ -8312,9 +8599,9 @@ object-assign@^4.1.0: integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== object-inspect@^1.13.1: - version "1.13.2" - resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff" - integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g== + version "1.13.1" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.1.tgz#b96c6109324ccfef6b12216a956ca4dc2ff94bc2" + integrity sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ== object-keys@^1.1.1: version "1.1.1" @@ -8403,16 +8690,16 @@ optionator@^0.8.1: word-wrap "~1.2.3" optionator@^0.9.1, optionator@^0.9.3: - version "0.9.4" - resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.4.tgz#7ea1c1a5d91d764fb282139c88fe11e182a3a734" - integrity sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g== + version "0.9.3" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.3.tgz#007397d44ed1872fdc6ed31360190f81814e2c64" + integrity sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg== dependencies: + "@aashutoshrathi/word-wrap" "^1.2.3" deep-is "^0.1.3" fast-levenshtein "^2.0.6" levn "^0.4.1" prelude-ls "^1.2.1" type-check "^0.4.0" - word-wrap "^1.2.5" ordinal@1.0.3, ordinal@^1.0.3: version "1.0.3" @@ -8496,9 +8783,9 @@ p-try@^2.0.0: integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== package-json-from-dist@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz#e501cd3094b278495eb4258d4c9f6d5ac3019f00" - integrity sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw== + version "1.0.1" + resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz#4f1471a010827a86f94cfd9b0727e36d267de505" + integrity sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw== package-json@^8.1.0: version "8.1.1" @@ -8689,9 +8976,9 @@ pg-types@^4.0.1: postgres-range "^1.1.1" pg@^8.11.3: - version "8.12.0" - resolved "https://registry.yarnpkg.com/pg/-/pg-8.12.0.tgz#9341724db571022490b657908f65aee8db91df79" - integrity sha512-A+LHUSnwnxrnL/tZ+OLfqR1SxLN3c/pgDztZ47Rpbsd4jUytsTtwQo/TLPRzPJMp/1pbhYVhH9cuSZLAajNfjQ== + version "8.11.5" + resolved "https://registry.yarnpkg.com/pg/-/pg-8.11.5.tgz#e722b0a5f1ed92931c31758ebec3ddf878dd4128" + integrity sha512-jqgNHSKL5cbDjFlHyYsCXmQDrfIX/3RsNwYqpd4N0Kt8niLuNoRNH+aazv6cOd43gPh9Y4DjQCtb+X0MH0Hvnw== dependencies: pg-connection-string "^2.6.4" pg-pool "^3.6.2" @@ -8708,10 +8995,10 @@ pgpass@1.x: dependencies: split2 "^4.1.0" -picocolors@^1.0.0, picocolors@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.1.tgz#a8ad579b571952f0e5d25892de5445bcfe25aaa1" - integrity sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew== +picocolors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" + integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1: version "2.3.1" @@ -8855,9 +9142,9 @@ prettier@^2.1.2, prettier@^2.3.1, prettier@^2.3.2, prettier@^2.8.3: integrity sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q== prettier@^3.0.3: - version "3.3.3" - resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.3.3.tgz#30c54fe0be0d8d12e6ae61dbb10109ea00d53105" - integrity sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew== + version "3.2.5" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.2.5.tgz#e52bc3090586e824964a8813b09aba6233b28368" + integrity sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A== pretty-format@^29.0.0, pretty-format@^29.7.0: version "29.7.0" @@ -8921,9 +9208,9 @@ proto-list@~1.2.1: integrity sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA== protobufjs@^7.2.5: - version "7.3.2" - resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-7.3.2.tgz#60f3b7624968868f6f739430cfbc8c9370e26df4" - integrity sha512-RXyHaACeqXeqAKGLDl68rQKbmObRsTIn4TYVUUug1KfS47YWCo5MacGITEryugIgZqORCvJWEk4l449POg5Txg== + version "7.2.6" + resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-7.2.6.tgz#4a0ccd79eb292717aacf07530a07e0ed20278215" + integrity sha512-dgJaEDDL6x8ASUZ1YqWciTRrdOuYNzoOf27oHNfdyvKqHr5i0FV7FSLU+aIeFjyFgVxrpTOtQUi0BLLBymZaBw== dependencies: "@protobufjs/aspromise" "^1.1.2" "@protobufjs/base64" "^1.1.2" @@ -8984,10 +9271,10 @@ pure-rand@^6.0.0: resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.1.0.tgz#d173cf23258231976ccbdb05247c9787957604f2" integrity sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA== -qs@^6.12.3, qs@^6.4.0: - version "6.12.3" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.12.3.tgz#e43ce03c8521b9c7fd7f1f13e514e5ca37727754" - integrity sha512-AWJm14H1vVaO/iNZ4/hO+HyaTehuy9nRqVdkTqlJt0HWvBiBIEXFmb4C0DGeYo3Xes9rrEW+TxHsaigCbN5ICQ== +qs@^6.11.2, qs@^6.4.0: + version "6.12.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.12.0.tgz#edd40c3b823995946a8a0b1f208669c7a200db77" + integrity sha512-trVZiI6RMOkO476zLGaBIzszOdFPnCCXHPG9kn0yuS1uz6xdVxPfZdB3vUig9pxPFDM9BRAgz/YUIVQ1/vuiUg== dependencies: side-channel "^1.0.6" @@ -9052,9 +9339,9 @@ rc@1.2.8, rc@~1.2.7: strip-json-comments "~2.0.1" react-is@^18.0.0: - version "18.3.1" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.3.1.tgz#e83557dc12eae63a99e003a46388b1dcbb44db7e" - integrity sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg== + version "18.2.0" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.2.0.tgz#199431eeaaa2e09f86427efbb4f1473edb47609b" + integrity sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w== read-pkg@^3.0.0: version "3.0.0" @@ -9097,6 +9384,11 @@ readable-stream@~1.0.26-4: isarray "0.0.1" string_decoder "~0.10.x" +readdirp@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-4.0.1.tgz#b2fe35f8dca63183cd3b86883ecc8f720ea96ae6" + integrity sha512-GkMg9uOTpIWWKbSsgwb5fA4EavTR+SG/PMPoAY8hkhHfEEY0/vqljY+XHqtDf2cr2IJtoNRDbrrEpZUiZCkYRw== + readdirp@~3.6.0: version "3.6.0" resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" @@ -9453,10 +9745,17 @@ semver@^6.3.0, semver@^6.3.1: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.2.1, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.5.1, semver@^7.5.2, semver@^7.5.3, semver@^7.5.4, semver@^7.6.2: - version "7.6.3" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" - integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== +semver@^7.2.1, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.5.1, semver@^7.5.2, semver@^7.5.3, semver@^7.5.4: + version "7.6.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.0.tgz#1a46a4db4bffcccd97b743b5005c8325f23d4e2d" + integrity sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg== + dependencies: + lru-cache "^6.0.0" + +semver@^7.6.2: + version "7.6.2" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.2.tgz#1e3b34759f896e8f14d6134732ce798aeb0c6e13" + integrity sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w== serialize-javascript@6.0.0: version "6.0.0" @@ -9465,13 +9764,6 @@ serialize-javascript@6.0.0: dependencies: randombytes "^2.1.0" -serialize-javascript@^6.0.2: - version "6.0.2" - resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.2.tgz#defa1e055c83bf6d59ea805d8da862254eb6a6c2" - integrity sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g== - dependencies: - randombytes "^2.1.0" - set-function-length@^1.2.1: version "1.2.2" resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" @@ -9589,16 +9881,16 @@ sinon-chai@^3.7.0: integrity sha512-mf5NURdUaSdnatJx3uhoBOrY9dtL19fiOtAdT1Azxg3+lNJFiuN0uzaU3xX1LeAfL17kHQhTAJgpsfhbMJMY2g== sinon@^17.0.1: - version "17.0.2" - resolved "https://registry.yarnpkg.com/sinon/-/sinon-17.0.2.tgz#470894bcc2d24b01bad539722ea46da949892405" - integrity sha512-uihLiaB9FhzesElPDFZA7hDcNABzsVHwr3YfmM9sBllVwab3l0ltGlRV1XhpNfIacNDLGD1QRZNLs5nU5+hTuA== + version "17.0.1" + resolved "https://registry.yarnpkg.com/sinon/-/sinon-17.0.1.tgz#26b8ef719261bf8df43f925924cccc96748e407a" + integrity sha512-wmwE19Lie0MLT+ZYNpDymasPHUKTaZHUH/pKEubRXIzySv9Atnlw+BUMGCzWgV7b7wO+Hw6f1TEOr0IUnmU8/g== dependencies: - "@sinonjs/commons" "^3.0.1" + "@sinonjs/commons" "^3.0.0" "@sinonjs/fake-timers" "^11.2.2" "@sinonjs/samsam" "^8.0.0" - diff "^5.2.0" - nise "^5.1.9" - supports-color "^7" + diff "^5.1.0" + nise "^5.1.5" + supports-color "^7.2.0" sinon@^18.0.0: version "18.0.0" @@ -9828,9 +10120,9 @@ spdx-expression-parse@^3.0.0: spdx-license-ids "^3.0.0" spdx-license-ids@^3.0.0: - version "3.0.18" - resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.18.tgz#22aa922dcf2f2885a6494a261f2d8b75345d0326" - integrity sha512-xxRs31BqRYHwiMzudOrpSiHtZ8i/GeionCBDSilhYRj+9gIcI8wCZTlXZKu9vZIVqViP3dcp9qE5G6AlIaD+TQ== + version "3.0.17" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.17.tgz#887da8aa73218e51a1d917502d79863161a93f9c" + integrity sha512-sh8PWc/ftMqAAdFiBu6Fy6JUOYjqDJBJvIhpfDMyHrr0Rbp5liZqd4TjtQ/RgfLjKFZb+LMx5hpml5qOWy0qvg== split-ca@^1.0.0, split-ca@^1.0.1: version "1.0.1" @@ -9914,7 +10206,7 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: +"string-width-cjs@npm:string-width@^4.2.0": version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -9931,6 +10223,15 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" +string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + string-width@^5.0.1, string-width@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" @@ -9997,7 +10298,7 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: +"strip-ansi-cjs@npm:strip-ansi@^6.0.1": version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -10018,6 +10319,13 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + strip-ansi@^7.0.1: version "7.1.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" @@ -10057,7 +10365,7 @@ strip-json-comments@~2.0.1: resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== -supports-color@8.1.1, supports-color@^8.0.0, supports-color@^8.1.1: +supports-color@8.1.1, supports-color@^8.0.0: version "8.1.1" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== @@ -10078,7 +10386,7 @@ supports-color@^5.3.0: dependencies: has-flag "^3.0.0" -supports-color@^7, supports-color@^7.1.0: +supports-color@^7, supports-color@^7.1.0, supports-color@^7.2.0: version "7.2.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== @@ -10106,10 +10414,10 @@ sync-rpc@^1.2.1: dependencies: get-port "^3.1.0" -synckit@^0.9.1: - version "0.9.1" - resolved "https://registry.yarnpkg.com/synckit/-/synckit-0.9.1.tgz#febbfbb6649979450131f64735aa3f6c14575c88" - integrity sha512-7gr8p9TQP6RAHusBOSLs46F4564ZrjV8xFmw5zCmgmhGUcw2hxsShhJ6CEiHQMgPDwAQ1fWHPM0ypc4RMAig4A== +synckit@^0.8.6: + version "0.8.8" + resolved "https://registry.yarnpkg.com/synckit/-/synckit-0.8.8.tgz#fe7fe446518e3d3d49f5e429f443cf08b6edfcd7" + integrity sha512-HwOKAP7Wc5aRGYdKH+dw0PRRpbO841v2DENBtjnR5HFWoiNByAl7vrx3p0G/rCyYXQsrxqtX48TImFtPcIHSpQ== dependencies: "@pkgr/core" "^0.1.0" tslib "^2.6.2" @@ -10120,6 +10428,8 @@ synckit@^0.9.1: "@matterlabs/hardhat-zksync-deploy" "^0.7.0" "@matterlabs/hardhat-zksync-solc" "=1.1.4" "@matterlabs/hardhat-zksync-verify" "^1.4.3" + "@openzeppelin/contracts-upgradeable-v4" "npm:@openzeppelin/contracts-upgradeable@4.9.5" + "@openzeppelin/contracts-v4" "npm:@openzeppelin/contracts@4.9.5" commander "^9.4.1" eslint "^8.51.0" eslint-plugin-import "^2.29.0" @@ -10359,12 +10669,11 @@ ts-generator@^0.1.1: ts-essentials "^1.0.0" ts-jest@^29.0.1: - version "29.2.3" - resolved "https://registry.yarnpkg.com/ts-jest/-/ts-jest-29.2.3.tgz#3d226ac36b8b820151a38f164414f9f6b412131f" - integrity sha512-yCcfVdiBFngVz9/keHin9EnsrQtQtEu3nRykNy9RVp+FiPFFbPJ3Sg6Qg4+TkmH0vMP5qsTKgXSsk80HRwvdgQ== + version "29.1.2" + resolved "https://registry.yarnpkg.com/ts-jest/-/ts-jest-29.1.2.tgz#7613d8c81c43c8cb312c6904027257e814c40e09" + integrity sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g== dependencies: bs-logger "0.x" - ejs "^3.1.10" fast-json-stable-stringify "2.x" jest-util "^29.0.0" json5 "^2.2.3" @@ -10429,9 +10738,9 @@ tslib@^1.8.1, tslib@^1.9.0, tslib@^1.9.3: integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== tslib@^2.6.2: - version "2.6.3" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.3.tgz#0438f810ad7a9edcde7a241c3d80db693c8cbfe0" - integrity sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ== + version "2.6.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" + integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== tsort@0.0.1: version "0.0.1" @@ -10481,12 +10790,12 @@ type-check@~0.3.2: dependencies: prelude-ls "~1.1.2" -type-detect@4.0.8: +type-detect@4.0.8, type-detect@^4.0.0, type-detect@^4.0.8: version "4.0.8" resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== -type-detect@^4.0.0, type-detect@^4.0.8, type-detect@^4.1.0: +type-detect@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.1.0.tgz#deb2453e8f08dcae7ae98c626b13dddb0155906c" integrity sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw== @@ -10590,9 +10899,9 @@ typescript@^4.3.5, typescript@^4.5.5, typescript@^4.6.4: integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g== typescript@^5.2.2: - version "5.5.4" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.5.4.tgz#d9852d6c82bad2d2eda4fd74a5762a8f5909e9ba" - integrity sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q== + version "5.4.4" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.4.4.tgz#eb2471e7b0a5f1377523700a21669dce30c2d952" + integrity sha512-dGE2Vv8cpVvw28v8HCPqyb08EzbBURxDpuhJvTrusShUfGnhHBafDsLdS1EhhxyL6BJQE+2cT3dDPAv+MQ6oLw== typical@^2.6.0, typical@^2.6.1: version "2.6.1" @@ -10615,9 +10924,9 @@ uc.micro@^1.0.1, uc.micro@^1.0.5: integrity sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA== uglify-js@^3.1.4: - version "3.19.1" - resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.19.1.tgz#2d5df6a0872c43da43187968308d7741d44b8056" - integrity sha512-y/2wiW+ceTYR2TSSptAhfnEtpLaQ4Ups5zrjB2d3kuVxHj16j/QJwPl5PvuGy9uARb39J0+iKxcRPvtpsx4A4A== + version "3.17.4" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.17.4.tgz#61678cf5fa3f5b7eb789bb345df29afb8257c22c" + integrity sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g== unbox-primitive@^1.0.2: version "1.0.2" @@ -10634,11 +10943,6 @@ undici-types@~5.26.4: resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== -undici-types@~6.11.1: - version "6.11.1" - resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.11.1.tgz#432ea6e8efd54a48569705a699e62d8f4981b197" - integrity sha512-mIDEX2ek50x0OlRgxryxsenE5XaQD4on5U2inY7RApK3SOJpofyw7uW2AyfMKkhAxXIceo2DeWGVGwyvng1GNQ== - undici@^5.14.0: version "5.28.4" resolved "https://registry.yarnpkg.com/undici/-/undici-5.28.4.tgz#6b280408edb6a1a604a9b20340f45b422e373068" @@ -10647,9 +10951,9 @@ undici@^5.14.0: "@fastify/busboy" "^2.0.0" undici@^6.18.2: - version "6.19.4" - resolved "https://registry.yarnpkg.com/undici/-/undici-6.19.4.tgz#5ec3b191699a1678ee0aa9ed14e443a682d0f7a8" - integrity sha512-i3uaEUwNdkRq2qtTRRJb13moW5HWqviu7Vl7oYRYz++uPtGHJj+x7TGjcEuwS5Mt2P4nA0U9dhIX3DdB6JGY0g== + version "6.19.2" + resolved "https://registry.yarnpkg.com/undici/-/undici-6.19.2.tgz#231bc5de78d0dafb6260cf454b294576c2f3cd31" + integrity sha512-JfjKqIauur3Q6biAtHJ564e3bWa8VvT+7cSiOJHFbX4Erv6CLGDpg8z+Fmg/1OI/47RA+GI2QZaF48SSaLvyBA== universalify@^0.1.0: version "0.1.2" @@ -10671,13 +10975,13 @@ untildify@^3.0.3: resolved "https://registry.yarnpkg.com/untildify/-/untildify-3.0.3.tgz#1e7b42b140bcfd922b22e70ca1265bfe3634c7c9" integrity sha512-iSk/J8efr8uPT/Z4eSUywnqyrQU7DSdMfdqK4iWEaUVVmcP5JcnpRqmVMwcwcnmI1ATFNgC5V90u09tBynNFKA== -update-browserslist-db@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz#7ca61c0d8650766090728046e416a8cde682859e" - integrity sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ== +update-browserslist-db@^1.0.13: + version "1.0.13" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz#3c5e4f5c083661bd38ef64b6328c26ed6c8248c4" + integrity sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg== dependencies: - escalade "^3.1.2" - picocolors "^1.0.1" + escalade "^3.1.1" + picocolors "^1.0.0" uri-js@^4.2.2: version "4.4.1" @@ -10687,12 +10991,12 @@ uri-js@^4.2.2: punycode "^2.1.0" url@^0.11.0: - version "0.11.4" - resolved "https://registry.yarnpkg.com/url/-/url-0.11.4.tgz#adca77b3562d56b72746e76b330b7f27b6721f3c" - integrity sha512-oCwdVC7mTuWiPyjLUz/COz5TLk6wgp0RCsN+wHZ2Ekneac9w8uuV0njcbbie2ME+Vs+d6duwmYuR3HgQXs1fOg== + version "0.11.3" + resolved "https://registry.yarnpkg.com/url/-/url-0.11.3.tgz#6f495f4b935de40ce4a0a52faee8954244f3d3ad" + integrity sha512-6hxOLGfZASQK/cijlZnZJTq8OXAkt/3YGfQX45vvMYXpZoo8NdWZcY73K108Jf759lS1Bv/8wXnHDTSz17dSRw== dependencies: punycode "^1.4.1" - qs "^6.12.3" + qs "^6.11.2" utf-8-validate@5.0.7: version "5.0.7" @@ -10739,9 +11043,9 @@ v8-compile-cache@^2.0.3: integrity sha512-ocyWc3bAHBB/guyqJQVI5o4BZkPhznPYUG2ea80Gond/BgNWpap8TOmLSeeQG7bnh2KMISxskdADG59j7zruhw== v8-to-istanbul@^9.0.1: - version "9.3.0" - resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz#b9572abfa62bd556c16d75fdebc1a411d5ff3175" - integrity sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA== + version "9.2.0" + resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.2.0.tgz#2ed7644a245cddd83d4e087b9b33b3e62dfd10ad" + integrity sha512-/EH/sDgxU2eGxajKdwLCDmQ4FWq+kpi3uCmBGpw1xJtnAxEjlD8j8PEiGWpCIMIs3ciNAgH0d3TTJiUkYzyZjA== dependencies: "@jridgewell/trace-mapping" "^0.3.12" "@types/istanbul-lib-coverage" "^2.0.1" @@ -10846,7 +11150,7 @@ widest-line@^3.1.0: dependencies: string-width "^4.0.0" -word-wrap@^1.2.5, word-wrap@~1.2.3: +word-wrap@~1.2.3: version "1.2.5" resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.5.tgz#d2c45c6dd4fbce621a66f136cbe328afd0410b34" integrity sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA== @@ -10869,12 +11173,21 @@ workerpool@6.2.0: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.0.tgz#827d93c9ba23ee2019c3ffaff5c27fccea289e8b" integrity sha512-Rsk5qQHJ9eowMH28Jwhe8HEbmdYDX4lwoMWshiCXugjtHqMD9ZbiqSDLxcsfdqsETPzVUtX5s1Z5kStiIM6l4A== -workerpool@^6.5.1: - version "6.5.1" - resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.5.1.tgz#060f73b39d0caf97c6db64da004cd01b4c099544" - integrity sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA== +workerpool@6.2.1: + version "6.2.1" + resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" + integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== + +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: +wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== @@ -10910,15 +11223,15 @@ ws@7.4.6: resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.6.tgz#5654ca8ecdeee47c33a9a4bf6d28e2be2980377c" integrity sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A== -ws@8.17.1: - version "8.17.1" - resolved "https://registry.yarnpkg.com/ws/-/ws-8.17.1.tgz#9293da530bb548febc95371d90f9c878727d919b" - integrity sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ== +ws@8.5.0: + version "8.5.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.5.0.tgz#bfb4be96600757fe5382de12c670dab984a1ed4f" + integrity sha512-BWX0SWVgLPzYwF8lTzEy1egjhS4S4OEAHfsO8o65WOVsrnSRGaSiUaa9e0ggGlkMTtBlmOpEXiie9RUcBO86qg== ws@^7.4.6: - version "7.5.10" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.10.tgz#58b5c20dc281633f6c19113f39b349bd8bd558d9" - integrity sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ== + version "7.5.9" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.9.tgz#54fa7db29f4c7cec68b1ddd3a89de099942bb591" + integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q== xhr2@0.1.3: version "0.1.3" @@ -10945,17 +11258,22 @@ yallist@^3.0.2: resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + yaml@^2.4.2: - version "2.5.0" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.5.0.tgz#c6165a721cf8000e91c36490a41d7be25176cf5d" - integrity sha512-2wWLbGbYDiSqqIKoPjar3MPgB94ErzCtrNE1FdqGuaO0pi2JGjmE8aW8TDZwzU7vuxcGRdL/4gPQwQ7hD5AMSw== + version "2.4.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.4.2.tgz#7a2b30f2243a5fc299e1f14ca58d475ed4bc5362" + integrity sha512-B3VqDZ+JAg1nZpaEmWtTXUlBneoGx6CPM9b0TENK6aoSu5t73dItudwdgmi6tHlIZZId4dZ9skcAQ2UbcyAeVA== yargs-parser@20.2.4: version "20.2.4" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.4.tgz#b42890f14566796f85ae8e3a25290d205f154a54" integrity sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA== -yargs-parser@^20.2.2, yargs-parser@^20.2.9: +yargs-parser@^20.2.2: version "20.2.9" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== @@ -10965,7 +11283,7 @@ yargs-parser@^21.0.1, yargs-parser@^21.1.1: resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== -yargs-unparser@2.0.0, yargs-unparser@^2.0.0: +yargs-unparser@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-2.0.0.tgz#f131f9226911ae5d9ad38c432fe809366c2325eb" integrity sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA== @@ -10975,7 +11293,7 @@ yargs-unparser@2.0.0, yargs-unparser@^2.0.0: flat "^5.0.2" is-plain-obj "^2.1.0" -yargs@16.2.0, yargs@^16.2.0: +yargs@16.2.0: version "16.2.0" resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== @@ -11012,9 +11330,13 @@ yocto-queue@^0.1.0: integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== yocto-queue@^1.0.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.1.1.tgz#fef65ce3ac9f8a32ceac5a634f74e17e5b232110" - integrity sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g== + version "1.0.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.0.0.tgz#7f816433fb2cbc511ec8bf7d263c3b58a1a3c251" + integrity sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g== + +"zksync-ethers-gw@https://github.com/zksync-sdk/zksync-ethers#kl/gateway-support": + version "6.12.1" + resolved "https://github.com/zksync-sdk/zksync-ethers#aa834387686ff8c04e41d1675b98f91d6c01847b" zksync-ethers@5.8.0-beta.5: version "5.8.0-beta.5" @@ -11034,3 +11356,7 @@ zksync-ethers@^6.9.0: version "6.9.0" resolved "https://registry.yarnpkg.com/zksync-ethers/-/zksync-ethers-6.9.0.tgz#efaff1d59e2cff837eeda84c4ba59fdca4972a91" integrity sha512-2CppwvLHtz689L7E9EhevbFtsqVukKC/lVicwdeUS2yqV46ET4iBR11rYdEfGW2oEo1h6yJuuwIBDFm2SybkIA== + +"zksync-ethers@git+https://github.com/zksync-sdk/zksync-ethers#ra/fix-l2-l1-bridging": + version "6.12.1" + resolved "git+https://github.com/zksync-sdk/zksync-ethers#d33ee6003e529adf79d9de4b19de9235da3a6da7" diff --git a/zk_toolbox/crates/common/src/term/spinner.rs b/zk_toolbox/crates/common/src/term/spinner.rs deleted file mode 100644 index b97ba075ac4..00000000000 --- a/zk_toolbox/crates/common/src/term/spinner.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::time::Instant; - -use cliclack::{spinner, ProgressBar}; - -use crate::config::global_config; - -/// Spinner is a helper struct to show a spinner while some operation is running. -pub struct Spinner { - msg: String, - pb: ProgressBar, - time: Instant, -} - -impl Spinner { - /// Create a new spinner with a message. - pub fn new(msg: &str) -> Self { - let pb = spinner(); - pb.start(msg); - if global_config().verbose { - pb.stop(msg); - } - Spinner { - msg: msg.to_owned(), - pb, - time: Instant::now(), - } - } - - /// Manually finish the spinner. - pub fn finish(self) { - self.pb.stop(format!( - "{} done in {} secs", - self.msg, - self.time.elapsed().as_secs_f64() - )); - } - - /// Interrupt the spinner with a failed message. - pub fn fail(self) { - self.pb.error(format!( - "{} failed in {} secs", - self.msg, - self.time.elapsed().as_secs_f64() - )); - } - - /// Freeze the spinner with current message. - pub fn freeze(self) { - self.pb.stop(self.msg); - } -} diff --git a/zk_toolbox/crates/common/src/wallets.rs b/zk_toolbox/crates/common/src/wallets.rs deleted file mode 100644 index ed5e11b3261..00000000000 --- a/zk_toolbox/crates/common/src/wallets.rs +++ /dev/null @@ -1,64 +0,0 @@ -use ethers::{ - core::rand::Rng, - signers::{coins_bip39::English, LocalWallet, MnemonicBuilder, Signer}, - types::{Address, H256}, -}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Wallet { - pub address: Address, - pub private_key: Option, -} - -impl Wallet { - pub fn random(rng: &mut impl Rng) -> Self { - let private_key = H256::random_using(rng); - let local_wallet = LocalWallet::from_bytes(private_key.as_bytes()).unwrap(); - - Self { - address: Address::from_slice(local_wallet.address().as_bytes()), - private_key: Some(private_key), - } - } - - pub fn new_with_key(private_key: H256) -> Self { - let local_wallet = LocalWallet::from_bytes(private_key.as_bytes()).unwrap(); - Self { - address: Address::from_slice(local_wallet.address().as_bytes()), - private_key: Some(private_key), - } - } - - pub fn from_mnemonic(mnemonic: &str, base_path: &str, index: u32) -> anyhow::Result { - let wallet = MnemonicBuilder::::default() - .phrase(mnemonic) - .derivation_path(&format!("{}/{}", base_path, index))? - .build()?; - let private_key = H256::from_slice(&wallet.signer().to_bytes()); - Ok(Self::new_with_key(private_key)) - } - - pub fn empty() -> Self { - Self { - address: Address::zero(), - private_key: Some(H256::zero()), - } - } -} - -#[test] -fn test_load_localhost_wallets() { - let wallet = Wallet::from_mnemonic( - "stuff slice staff easily soup parent arm payment cotton trade scatter struggle", - "m/44'/60'/0'/0", - 1, - ) - .unwrap(); - assert_eq!( - wallet.address, - Address::from_slice( - ðers::utils::hex::decode("0xa61464658AfeAf65CccaaFD3a512b69A83B77618").unwrap() - ) - ); -} diff --git a/zk_toolbox/crates/zk_inception/build.rs b/zk_toolbox/crates/zk_inception/build.rs deleted file mode 100644 index 43c8d7a5aac..00000000000 --- a/zk_toolbox/crates/zk_inception/build.rs +++ /dev/null @@ -1,11 +0,0 @@ -use std::path::PathBuf; - -use ethers::contract::Abigen; - -fn main() -> eyre::Result<()> { - let outdir = PathBuf::from(std::env::var("OUT_DIR")?).canonicalize()?; - Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json")? - .generate()? - .write_to_file(outdir.join("consensus_registry_abi.rs"))?; - Ok(()) -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs deleted file mode 100644 index b2d92ebd104..00000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ /dev/null @@ -1,176 +0,0 @@ -use std::path::PathBuf; - -use anyhow::Context; -use common::{ - config::global_config, - db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, - logger, - server::{Server, ServerMode}, - spinner::Spinner, -}; -use config::{ - override_config, set_databases, set_file_artifacts, set_rocks_db_config, - traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, ContractsConfig, EcosystemConfig, FileArtifacts, GeneralConfig, GenesisConfig, - SecretsConfig, WalletsConfig, -}; -use types::ProverMode; -use xshell::Shell; -use zksync_basic_types::commitment::L1BatchCommitmentMode; - -use super::args::genesis::GenesisArgsFinal; -use crate::{ - commands::chain::args::genesis::GenesisArgs, - consts::{ - PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, - PROVER_MIGRATIONS, SERVER_MIGRATIONS, - }, - messages::{ - MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, - MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_FAILED_TO_RUN_SERVER_ERR, - MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, - MSG_INITIALIZING_PROVER_DATABASE, MSG_INITIALIZING_SERVER_DATABASE, - MSG_RECREATE_ROCKS_DB_ERRROR, MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, - MSG_STARTING_GENESIS_SPINNER, - }, - utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, -}; - -pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain_config = ecosystem_config - .load_current_chain() - .context(MSG_CHAIN_NOT_INITIALIZED)?; - let args = args.fill_values_with_prompt(&chain_config); - - genesis(args, shell, &chain_config).await?; - logger::outro(MSG_GENESIS_COMPLETED); - - Ok(()) -} - -pub async fn genesis( - args: GenesisArgsFinal, - shell: &Shell, - config: &ChainConfig, -) -> anyhow::Result<()> { - shell.create_dir(&config.rocks_db_path)?; - - let link_to_code = config.link_to_code.clone(); - let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) - .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; - let mut general = config.get_general_config()?; - let file_artifacts = FileArtifacts::new(config.artifacts.clone()); - set_rocks_db_config(&mut general, rocks_db)?; - set_file_artifacts(&mut general, file_artifacts); - general.save_with_base_path(shell, &config.configs)?; - - if config.prover_version != ProverMode::NoProofs { - override_config( - shell, - link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), - config, - )?; - } - - if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { - override_config( - shell, - link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), - config, - )?; - } - - let mut secrets = config.get_secrets_config()?; - set_databases(&mut secrets, &args.server_db, &args.prover_db)?; - secrets.save_with_base_path(shell, &config.configs)?; - - logger::note( - MSG_SELECTED_CONFIG, - logger::object_to_string(serde_json::json!({ - "chain_config": config, - "server_db_config": args.server_db, - "prover_db_config": args.prover_db, - })), - ); - logger::info(MSG_STARTING_GENESIS); - - let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); - initialize_databases( - shell, - &args.server_db, - &args.prover_db, - config.link_to_code.clone(), - args.dont_drop, - ) - .await?; - spinner.finish(); - - let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); - run_server_genesis(config, shell)?; - spinner.finish(); - - Ok(()) -} - -async fn initialize_databases( - shell: &Shell, - server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, - link_to_code: PathBuf, - dont_drop: bool, -) -> anyhow::Result<()> { - let path_to_server_migration = link_to_code.join(SERVER_MIGRATIONS); - - if global_config().verbose { - logger::debug(MSG_INITIALIZING_SERVER_DATABASE) - } - if !dont_drop { - drop_db_if_exists(server_db_config) - .await - .context(MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR)?; - init_db(server_db_config).await?; - } - migrate_db( - shell, - path_to_server_migration, - &server_db_config.full_url(), - ) - .await?; - - if global_config().verbose { - logger::debug(MSG_INITIALIZING_PROVER_DATABASE) - } - if !dont_drop { - drop_db_if_exists(prover_db_config) - .await - .context(MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR)?; - init_db(prover_db_config).await?; - } - let path_to_prover_migration = link_to_code.join(PROVER_MIGRATIONS); - migrate_db( - shell, - path_to_prover_migration, - &prover_db_config.full_url(), - ) - .await?; - - Ok(()) -} - -fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { - let server = Server::new(None, chain_config.link_to_code.clone(), false); - server - .run( - shell, - ServerMode::Genesis, - GenesisConfig::get_path_with_base_path(&chain_config.configs), - WalletsConfig::get_path_with_base_path(&chain_config.configs), - GeneralConfig::get_path_with_base_path(&chain_config.configs), - SecretsConfig::get_path_with_base_path(&chain_config.configs), - ContractsConfig::get_path_with_base_path(&chain_config.configs), - None, - vec![], - ) - .context(MSG_FAILED_TO_RUN_SERVER_ERR) -} diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml deleted file mode 100644 index d343e7af43e..00000000000 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "zk_supervisor" -version = "0.1.0" -edition.workspace = true -homepage.workspace = true -license.workspace = true -authors.workspace = true -exclude.workspace = true -repository.workspace = true -description.workspace = true -keywords.workspace = true - -[dependencies] -anyhow.workspace = true -clap.workspace = true -common.workspace = true -config.workspace = true -chrono.workspace = true -ethers.workspace = true -human-panic.workspace = true -strum.workspace = true -tokio.workspace = true -url.workspace = true -xshell.workspace = true -serde.workspace = true -serde_json.workspace = true -clap-markdown.workspace = true -futures.workspace = true -types.workspace = true -serde_yaml.workspace = true -zksync_basic_types.workspace = true -sqruff-lib = "0.19.0" diff --git a/zk_toolbox/crates/zk_supervisor/README.md b/zk_toolbox/crates/zk_supervisor/README.md deleted file mode 100644 index 865bd2f0d57..00000000000 --- a/zk_toolbox/crates/zk_supervisor/README.md +++ /dev/null @@ -1,386 +0,0 @@ -# Command-Line Help for `zk_supervisor` - -This document contains the help content for the `zk_supervisor` command-line program. - -**Command Overview:** - -- [`zk_supervisor`↴](#zk_supervisor) -- [`zk_supervisor database`↴](#zk_supervisor-database) -- [`zk_supervisor database check-sqlx-data`↴](#zk_supervisor-database-check-sqlx-data) -- [`zk_supervisor database drop`↴](#zk_supervisor-database-drop) -- [`zk_supervisor database migrate`↴](#zk_supervisor-database-migrate) -- [`zk_supervisor database new-migration`↴](#zk_supervisor-database-new-migration) -- [`zk_supervisor database prepare`↴](#zk_supervisor-database-prepare) -- [`zk_supervisor database reset`↴](#zk_supervisor-database-reset) -- [`zk_supervisor database setup`↴](#zk_supervisor-database-setup) -- [`zk_supervisor test`↴](#zk_supervisor-test) -- [`zk_supervisor test integration`↴](#zk_supervisor-test-integration) -- [`zk_supervisor test revert`↴](#zk_supervisor-test-revert) -- [`zk_supervisor test recovery`↴](#zk_supervisor-test-recovery) -- [`zk_supervisor test upgrade`↴](#zk_supervisor-test-upgrade) -- [`zk_supervisor test rust`↴](#zk_supervisor-test-rust) -- [`zk_supervisor test l1-contracts`↴](#zk_supervisor-test-l1-contracts) -- [`zk_supervisor test prover`↴](#zk_supervisor-test-prover) -- [`zk_supervisor clean`↴](#zk_supervisor-clean) -- [`zk_supervisor clean all`↴](#zk_supervisor-clean-all) -- [`zk_supervisor clean containers`↴](#zk_supervisor-clean-containers) -- [`zk_supervisor clean contracts-cache`↴](#zk_supervisor-clean-contracts-cache) -- [`zk_supervisor snapshot`↴](#zk_supervisor-snapshot) -- [`zk_supervisor snapshot create`↴](#zk_supervisor-snapshot-create) -- [`zk_supervisor lint`↴](#zk_supervisor-lint) -- [`zk_supervisor fmt`↴](#zk_supervisor-fmt) -- [`zk_supervisor fmt rustfmt`↴](#zk_supervisor-fmt-rustfmt) -- [`zk_supervisor fmt contract`↴](#zk_supervisor-fmt-contract) -- [`zk_supervisor fmt prettier`↴](#zk_supervisor-fmt-prettier) -- [`zk_supervisor prover info`↴](#zk_supervisor-prover-info) -- [`zk_supervisor prover insert-version`↴](#zk_supervisor-prover-insert-version) -- [`zk_supervisor prover insert-batch`↴](#zk_supervisor-prover-insert-batch) - -## `zk_supervisor` - -ZK Toolbox is a set of tools for working with zk stack. - -**Usage:** `zk_supervisor [OPTIONS] ` - -###### **Subcommands:** - -- `database` — Database related commands -- `test` — Run tests -- `clean` — Clean artifacts -- `snapshot` — Snapshots creator -- `lint` — Lint code -- `fmt` — Format code -- `prover-version` — Protocol version used by provers - -###### **Options:** - -- `-v`, `--verbose` — Verbose mode -- `--chain ` — Chain to use -- `--ignore-prerequisites` — Ignores prerequisites checks - -## `zk_supervisor database` - -Database related commands - -**Usage:** `zk_supervisor database ` - -###### **Subcommands:** - -- `check-sqlx-data` — Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked. -- `drop` — Drop databases. If no databases are selected, all databases will be dropped. -- `migrate` — Migrate databases. If no databases are selected, all databases will be migrated. -- `new-migration` — Create new migration -- `prepare` — Prepare sqlx-data.json. If no databases are selected, all databases will be prepared. -- `reset` — Reset databases. If no databases are selected, all databases will be reset. -- `setup` — Setup databases. If no databases are selected, all databases will be setup. - -## `zk_supervisor database check-sqlx-data` - -Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked. - -**Usage:** `zk_supervisor database check-sqlx-data [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database drop` - -Drop databases. If no databases are selected, all databases will be dropped. - -**Usage:** `zk_supervisor database drop [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database migrate` - -Migrate databases. If no databases are selected, all databases will be migrated. - -**Usage:** `zk_supervisor database migrate [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database new-migration` - -Create new migration - -**Usage:** `zk_supervisor database new-migration [OPTIONS]` - -###### **Options:** - -- `--database ` — Database to create new migration for - - Possible values: `prover`, `core` - -- `--name ` — Migration name - -## `zk_supervisor database prepare` - -Prepare sqlx-data.json. If no databases are selected, all databases will be prepared. - -**Usage:** `zk_supervisor database prepare [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database reset` - -Reset databases. If no databases are selected, all databases will be reset. - -**Usage:** `zk_supervisor database reset [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database setup` - -Setup databases. If no databases are selected, all databases will be setup. - -**Usage:** `zk_supervisor database setup [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor test` - -Run tests - -**Usage:** `zk_supervisor test ` - -###### **Subcommands:** - -- `integration` — Run integration tests -- `revert` — Run revert tests -- `recovery` — Run recovery tests -- `upgrade` — Run upgrade tests -- `rust` — Run unit-tests, accepts optional cargo test flags -- `l1-contracts` — Run L1 contracts tests -- `prover` — Run prover tests - -## `zk_supervisor test integration` - -Run integration tests - -**Usage:** `zk_supervisor test integration [OPTIONS]` - -###### **Options:** - -- `-e`, `--external-node` — Run tests for external node - -## `zk_supervisor test revert` - -Run revert tests - -**Usage:** `zk_supervisor test revert [OPTIONS]` - -###### **Options:** - -- `--enable-consensus` — Enable consensus -- `-e`, `--external-node` — Run tests for external node - -## `zk_supervisor test recovery` - -Run recovery tests - -**Usage:** `zk_supervisor test recovery [OPTIONS]` - -###### **Options:** - -- `-s`, `--snapshot` — Run recovery from a snapshot instead of genesis - -## `zk_supervisor test upgrade` - -Run upgrade tests - -**Usage:** `zk_supervisor test upgrade` - -## `zk_supervisor test rust` - -Run unit-tests, accepts optional cargo test flags - -**Usage:** `zk_supervisor test rust [OPTIONS]` - -###### **Options:** - -- `--options ` — Cargo test flags - -## `zk_supervisor test l1-contracts` - -Run L1 contracts tests - -**Usage:** `zk_supervisor test l1-contracts` - -## `zk_supervisor test prover` - -Run prover tests - -**Usage:** `zk_supervisor test prover` - -## `zk_supervisor clean` - -Clean artifacts - -**Usage:** `zk_supervisor clean ` - -###### **Subcommands:** - -- `all` — Remove containers and contracts cache -- `containers` — Remove containers and docker volumes -- `contracts-cache` — Remove contracts caches - -## `zk_supervisor clean all` - -Remove containers and contracts cache - -**Usage:** `zk_supervisor clean all` - -## `zk_supervisor clean containers` - -Remove containers and docker volumes - -**Usage:** `zk_supervisor clean containers` - -## `zk_supervisor clean contracts-cache` - -Remove contracts caches - -**Usage:** `zk_supervisor clean contracts-cache` - -## `zk_supervisor snapshot` - -Snapshots creator - -**Usage:** `zk_supervisor snapshot ` - -###### **Subcommands:** - -- `create` — - -## `zk_supervisor snapshot create` - -**Usage:** `zk_supervisor snapshot create` - -## `zk_supervisor lint` - -Lint code - -**Usage:** `zk_supervisor lint [OPTIONS]` - -###### **Options:** - -- `-c`, `--check` -- `-e`, `--extensions ` - - Possible values: `md`, `sol`, `js`, `ts`, `rs` - -## `zk_supervisor fmt` - -Format code - -**Usage:** `zk_supervisor fmt [OPTIONS] [COMMAND]` - -###### **Subcommands:** - -- `rustfmt` — -- `contract` — -- `prettier` — - -###### **Options:** - -- `-c`, `--check` - -## `zk_supervisor fmt rustfmt` - -**Usage:** `zk_supervisor fmt rustfmt` - -## `zk_supervisor fmt contract` - -**Usage:** `zk_supervisor fmt contract` - -## `zk_supervisor fmt prettier` - -**Usage:** `zk_supervisor fmt prettier [OPTIONS]` - -###### **Options:** - -- `-e`, `--extensions ` - - Possible values: `md`, `sol`, `js`, `ts`, `rs` - -## `zk_supervisor prover info` - -Prints prover protocol version, snark wrapper and prover database URL - -**Usage:** `zk_supervisor prover info` - -## `zk_supervisor prover insert-version` - -Inserts protocol version into prover database. - -**Usage:** `zk_supervisor prover insert-version [OPTIONS]` - -###### **Options:** - -- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. -- `--snark-wrapper ` — Snark wrapper hash. -- `--default` - use default values for protocol version and snark wrapper hash (the ones found in zksync-era). - -## `zk_supervisor prover insert-batch` - -Inserts batch into prover database. - -**Usage:** `zk_supervisor prover insert-batch` - -###### **Options:** - -- `--number ` — Number of the batch to insert. -- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. -- `--default` - use default value for protocol version (the one found in zksync-era). - -
- - This document was generated automatically by -clap-markdown. diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs deleted file mode 100644 index 242affd8a71..00000000000 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ /dev/null @@ -1,151 +0,0 @@ -use clap::{Parser, Subcommand}; -use commands::{ - config_writer::ConfigWriterArgs, contracts::ContractsArgs, database::DatabaseCommands, - lint::LintArgs, prover::ProverCommands, send_transactions::args::SendTransactionsArgs, - snapshot::SnapshotCommands, test::TestCommands, -}; -use common::{ - check_general_prerequisites, - config::{global_config, init_global_config, GlobalConfig}, - error::log_error, - init_prompt_theme, logger, - version::version_message, -}; -use config::EcosystemConfig; -use messages::{ - msg_global_chain_does_not_exist, MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, - MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, - MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, - MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, -}; -use xshell::Shell; - -use crate::commands::{clean::CleanCommands, fmt::FmtArgs}; - -mod commands; -mod consts; -mod dals; -mod defaults; -mod messages; - -#[derive(Parser, Debug)] -#[command( - version = version_message(env!("CARGO_PKG_VERSION")), - about -)] -struct Supervisor { - #[command(subcommand)] - command: SupervisorSubcommands, - #[clap(flatten)] - global: SupervisorGlobalArgs, -} - -#[derive(Subcommand, Debug)] -enum SupervisorSubcommands { - #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT, alias = "db")] - Database(DatabaseCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_TESTS_ABOUT, alias = "t")] - Test(TestCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_CLEAN)] - Clean(CleanCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT)] - Snapshot(SnapshotCommands), - #[command(about = MSG_SUBCOMMAND_LINT_ABOUT, alias = "l")] - Lint(LintArgs), - #[command(about = MSG_SUBCOMMAND_FMT_ABOUT)] - Fmt(FmtArgs), - #[command(hide = true)] - Markdown, - #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] - Prover(ProverCommands), - #[command(about = MSG_CONTRACTS_ABOUT)] - Contracts(ContractsArgs), - #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] - ConfigWriter(ConfigWriterArgs), - #[command(about = MSG_SEND_TXNS_ABOUT)] - SendTransactions(SendTransactionsArgs), -} - -#[derive(Parser, Debug)] -#[clap(next_help_heading = "Global options")] -struct SupervisorGlobalArgs { - /// Verbose mode - #[clap(short, long, global = true)] - verbose: bool, - /// Chain to use - #[clap(long, global = true)] - chain: Option, - /// Ignores prerequisites checks - #[clap(long, global = true)] - ignore_prerequisites: bool, -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - human_panic::setup_panic!(); - - // We must parse arguments before printing the intro, because some autogenerated - // Clap commands (like `--version` would look odd otherwise). - let args = Supervisor::parse(); - - init_prompt_theme(); - - logger::new_empty_line(); - logger::intro(); - - let shell = Shell::new().unwrap(); - init_global_config_inner(&shell, &args.global)?; - - if !global_config().ignore_prerequisites { - check_general_prerequisites(&shell); - } - - match run_subcommand(args, &shell).await { - Ok(_) => {} - Err(error) => { - log_error(error); - std::process::exit(1); - } - } - - Ok(()) -} - -async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { - match args.command { - SupervisorSubcommands::Database(command) => commands::database::run(shell, command).await?, - SupervisorSubcommands::Test(command) => commands::test::run(shell, command).await?, - SupervisorSubcommands::Clean(command) => commands::clean::run(shell, command)?, - SupervisorSubcommands::Snapshot(command) => commands::snapshot::run(shell, command).await?, - SupervisorSubcommands::Markdown => { - clap_markdown::print_help_markdown::(); - } - SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, - SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, - SupervisorSubcommands::Prover(command) => commands::prover::run(shell, command).await?, - SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, - SupervisorSubcommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, - SupervisorSubcommands::SendTransactions(args) => { - commands::send_transactions::run(shell, args).await? - } - } - Ok(()) -} - -fn init_global_config_inner(shell: &Shell, args: &SupervisorGlobalArgs) -> anyhow::Result<()> { - if let Some(name) = &args.chain { - if let Ok(config) = EcosystemConfig::from_file(shell) { - let chains = config.list_of_chains(); - if !chains.contains(name) { - anyhow::bail!(msg_global_chain_does_not_exist(name, &chains.join(", "))); - } - } - } - - init_global_config(GlobalConfig { - verbose: args.verbose, - chain_name: args.chain.clone(), - ignore_prerequisites: args.ignore_prerequisites, - }); - Ok(()) -} diff --git a/zk_toolbox/rust-toolchain b/zk_toolbox/rust-toolchain deleted file mode 100644 index dbd41264aa9..00000000000 --- a/zk_toolbox/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -1.81.0 diff --git a/zk_toolbox/zkup/README.md b/zk_toolbox/zkup/README.md deleted file mode 100644 index d6e3e634688..00000000000 --- a/zk_toolbox/zkup/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# zkup - zk_toolbox Installer - -`zkup` is a script designed to simplify the installation of -[zk_toolbox](https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox). It allows you to install the tool from a -local directory or directly from a GitHub repository. - -## Getting Started - -To install `zkup`, run the following command: - -```bash -curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zk_toolbox/zkup/install | bash -``` - -After installing `zkup`, you can use it to install `zk_toolbox` with: - -```bash -zkup -``` - -## Usage - -The `zkup` script provides various options for installing `zk_toolbox`: - -### Options - -- `-p, --path ` - Specify a local path to install `zk_toolbox` from. This option is ignored if `--repo` is provided. - -- `-r, --repo ` - GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". - -- `-b, --branch ` - Git branch to use when installing from a repository. Ignored if `--commit` or `--version` is provided. - -- `-c, --commit ` - Git commit hash to use when installing from a repository. Ignored if `--branch` or `--version` is provided. - -- `-v, --version ` - Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. - -- `--inception` - Installs `zk_inception` from the repository. By default, `zkup` installs `zk_inception` and `zk_supervisor`. - -- `--supervisor` - Installs `zk_supervisor` from the repository. - -### Local Installation - -If you provide a local path using the `-p` or `--path` option, `zkup` will install `zk_toolbox` from that directory. -Note that repository-specific arguments (`--repo`, `--branch`, `--commit`, `--version`) will be ignored in this case to -preserve git state. - -### Repository Installation - -By default, `zkup` installs `zk_toolbox` from the "matter-labs/zksync-era" GitHub repository. You can specify a -different repository, branch, commit, or version using the respective options. If multiple arguments are provided, -`zkup` will prioritize them as follows: - -- `--version` -- `--commit` -- `--branch` - -### Examples - -**Install from a GitHub repository with a specific version:** - -```bash -zkup --repo matter-labs/zksync-era --version 0.1.1 -``` - -**Install from a local path, only installing `zk_inception`:** - -```bash -zkup --path /path/to/local/zk_toolbox --inception -``` diff --git a/zk_toolbox/zkup/install b/zk_toolbox/zkup/install deleted file mode 100755 index 4e24b03dec4..00000000000 --- a/zk_toolbox/zkup/install +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -BASE_DIR=${XDG_CONFIG_HOME:-$HOME} -ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} -ZKT_BIN_DIR="$ZKT_DIR/bin" - -BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zk_toolbox/zkup/zkup" -BIN_PATH="$ZKT_BIN_DIR/zkup" - -mkdir -p "$ZKT_BIN_DIR" -curl -sSfL "$BIN_URL" -o "$BIN_PATH" -chmod +x "$BIN_PATH" - -if [[ ":$PATH:" == *":${ZKT_BIN_DIR}:"* ]]; then - echo "zkup: found ${ZKT_BIN_DIR} in PATH" - exit 0 -fi - -case $SHELL in -*/zsh) - PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" - ;; -*/bash) - PROFILE="$HOME/.bashrc" - ;; -*/fish) - PROFILE="$HOME/.config/fish/config.fish" - ;; -*/ash) - PROFILE="$HOME/.profile" - ;; -*) - echo "zkup: could not detect shell, manually add ${ZKT_BIN_DIR} to your PATH." - exit 1 - ;; -esac - -if [[ ! -f "$PROFILE" ]]; then - echo "zkup: Profile file $PROFILE does not exist, creating it." - touch "$PROFILE" -fi - -if [[ "$SHELL" == *"/fish"* ]]; then - echo -e "\n# Added by zkup\nfish_add_path -a $ZKT_BIN_DIR" >>"$PROFILE" - echo "zkup: Added $ZKT_BIN_DIR to PATH in $PROFILE using fish_add_path." -else - echo -e "\n# Added by zkup\nexport PATH=\"\$PATH:$ZKT_BIN_DIR\"" >>"$PROFILE" - echo "zkup: Added $ZKT_BIN_DIR to PATH in $PROFILE." -fi - -echo -echo "Added zkup to PATH." -echo "Run 'source $PROFILE' or start a new terminal session to use zkup." -echo "Then run 'zkup' to install ZK Toolbox." diff --git a/zk_toolbox/zkup/zkup b/zk_toolbox/zkup/zkup deleted file mode 100755 index e6ca1748738..00000000000 --- a/zk_toolbox/zkup/zkup +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -BASE_DIR=${XDG_CONFIG_HOME:-$HOME} -ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} -ZKT_BIN_DIR="$ZKT_DIR/bin" - -ZKUP_INSTALL_SUPERVISOR=0 -ZKUP_INSTALL_INCEPTION=0 -ZKUP_ALIAS=0 - -BINS=() - -main() { - parse_args "$@" - - zktoolbox_banner - - check_prerequisites - mkdir -p "$ZKT_BIN_DIR" - - set_bins - - if [ -n "$ZKUP_PATH" ]; then - install_local - else - install_from_repo - fi - - zktoolbox_banner - - for bin in "${BINS[@]}"; do - success "Installed $bin to $ZKT_BIN_DIR/$bin" - done - - if [ $ZKUP_ALIAS -eq 1 ]; then - create_alias - fi -} - -PREREQUISITES=(cargo git) - -check_prerequisites() { - say "Checking prerequisites" - - failed_prerequisites=() - for prerequisite in "${PREREQUISITES[@]}"; do - if ! check_prerequisite "$prerequisite"; then - failed_prerequisites+=("$prerequisite") - fi - done - if [ ${#failed_prerequisites[@]} -gt 0 ]; then - err "The following prerequisites are missing: ${failed_prerequisites[*]}" - exit 1 - fi -} - -check_prerequisite() { - command -v "$1" &>/dev/null -} - -parse_args() { - while [[ $# -gt 0 ]]; do - case $1 in - --) - shift - break - ;; - - -p | --path) - shift - ZKUP_PATH=$1 - ;; - -r | --repo) - shift - ZKUP_REPO=$1 - ;; - -b | --branch) - shift - ZKUP_BRANCH=$1 - ;; - -c | --commit) - shift - ZKUP_COMMIT=$1 - ;; - -v | --version) - shift - ZKUP_VERSION=$1 - ;; - --inception) ZKUP_INSTALL_INCEPTION=1 ;; - --supervisor) ZKUP_INSTALL_SUPERVISOR=1 ;; - -a | --alias) ZKUP_ALIAS=1 ;; - -h | --help) - usage - exit 0 - ;; - *) - err "Unknown argument: $1" - usage - exit 1 - ;; - esac - shift - done -} - -usage() { - cat < Specify a local path to install zk_toolbox from. Ignored if --repo is provided. - -r, --repo GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". - -b, --branch Git branch to use when installing from a repository. Ignored if --commit or --version is provided. - -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. - -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. - -a, --alias Create aliases zki and zks for zk_inception and zk_supervisor binaries. - --inception Installs the zk_inception binary. Default is to install both zk_inception and zk_supervisor binaries. - --supervisor Installs the zk_supervisor binary. Default is to install both zk_inception and zk_supervisor binaries. - -h, --help Show this help message and exit. - -Examples: - $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 - $(basename "$0") --path /path/to/local/zk_toolbox --inception -EOF -} - -set_bins() { - if [ $ZKUP_INSTALL_INCEPTION -eq 1 ]; then - BINS+=(zk_inception) - fi - - if [ $ZKUP_INSTALL_SUPERVISOR -eq 1 ]; then - BINS+=(zk_supervisor) - fi - - # Installs both binaries if not option is provided - if [ ${#BINS[@]} -eq 0 ]; then - BINS=(zk_inception zk_supervisor) - fi -} - -install_local() { - if [ ! -d "$ZKUP_PATH/zk_toolbox" ]; then - err "Path $ZKUP_PATH does not contain zk_toolbox" - exit 1 - fi - - if [ -n "$ZKUP_BRANCH" ] || [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_VERSION" ] || [ -n "$ZKUP_REPO" ]; then - warn "Ignoring --repo, --branch, --commit and --version arguments when installing from local path" - fi - - say "Installing zk_toolbox from $ZKUP_PATH" - ensure cd "$ZKUP_PATH"/zk_toolbox - - for bin in "${BINS[@]}"; do - say "Installing $bin" - ensure cargo install --root $ZKT_DIR --path ./crates/$bin --force - done -} - -install_from_repo() { - if [ -n "$ZKUP_PATH" ]; then - warn "Ignoring --path argument when installing from repository" - fi - - ZKUP_REPO=${ZKUP_REPO:-"matter-labs/zksync-era"} - - say "Installing zk_toolbox from $ZKUP_REPO" - - if [ -n "$ZKUP_VERSION" ]; then - if [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_BRANCH" ]; then - warn "Ignoring --commit and --branch arguments when installing by version" - fi - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --tag "zk_toolbox-v$ZKUP_VERSION" --locked "${BINS[@]}" --force - elif [ -n "$ZKUP_COMMIT" ]; then - if [ -n "$ZKUP_BRANCH" ]; then - warn "Ignoring --branch argument when installing by commit" - fi - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --rev "$ZKUP_COMMIT" --locked "${BINS[@]}" --force - elif [ -n "$ZKUP_BRANCH" ]; then - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --branch "$ZKUP_BRANCH" --locked "${BINS[@]}" --force - else - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --locked "${BINS[@]}" --force - fi -} - -create_alias() { - if [[ "${BINS[@]}" =~ "zk_inception" ]]; then - say "Creating alias 'zki' for zk_inception" - ensure ln -sf "$ZKT_BIN_DIR/zk_inception" "$ZKT_BIN_DIR/zki" - fi - - if [[ "${BINS[@]}" =~ "zk_supervisor" ]]; then - say "Creating alias 'zks' for zk_supervisor" - ensure ln -sf "$ZKT_BIN_DIR/zk_supervisor" "$ZKT_BIN_DIR/zks" - fi -} - -ensure() { - if ! "$@"; then - err "command failed: $*" - exit 1 - fi -} - -say() { - local action="${1%% *}" - local rest="${1#"$action" }" - - echo -e "\033[1;32m$action\033[0m $rest" -} - -success() { - echo -e "\033[1;32m$1\033[0m" -} - -warn() { - echo -e "\033[1;33mWARNING: $1\033[0m" -} - -err() { - echo -e "\033[1;31mERROR: $1\033[0m" >&2 -} - -zktoolbox_banner() { - printf ' - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -███████╗██╗ ██╗ ████████╗ ██████╗ ██████╗ ██╗ ██████╗ ██████╗ ██╗ ██╗ -╚══███╔╝██║ ██╔╝ ╚══██╔══╝██╔═══██╗██╔═══██╗██║ ██╔══██╗██╔═══██╗╚██╗██╔╝ - ███╔╝ █████╔╝ ██║ ██║ ██║██║ ██║██║ ██████╔╝██║ ██║ ╚███╔╝ - ███╔╝ ██╔═██╗ ██║ ██║ ██║██║ ██║██║ ██╔══██╗██║ ██║ ██╔██╗ -███████╗██║ ██╗ ██║ ╚██████╔╝╚██████╔╝███████╗██████╔╝╚██████╔╝██╔╝ ██╗ -╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚═════╝ ╚═════╝ ╚═╝ ╚═╝ - - - A Comprehensive Toolkit for Creating and Managing ZK Stack Chains - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -Repo : https://github.com/matter-labs/zksync-era/ -Docs : https://docs.zksync.io/ -Contribute : https://github.com/matter-labs/zksync-era/pulls - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -' -} - -main "$@" diff --git a/zk_toolbox/CHANGELOG.md b/zkstack_cli/CHANGELOG.md similarity index 100% rename from zk_toolbox/CHANGELOG.md rename to zkstack_cli/CHANGELOG.md diff --git a/zk_toolbox/Cargo.lock b/zkstack_cli/Cargo.lock similarity index 96% rename from zk_toolbox/Cargo.lock rename to zkstack_cli/Cargo.lock index 4d985bb8b09..1427939f4ef 100644 --- a/zk_toolbox/Cargo.lock +++ b/zkstack_cli/Cargo.lock @@ -14,9 +14,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -161,9 +161,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -172,9 +172,9 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", @@ -584,9 +584,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.23" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bbb537bb4a30b90362caddba8f360c0a56bc13d3a5570028e7197204cb54a17" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ "jobserver", "libc", @@ -658,9 +658,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -677,9 +677,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -688,6 +688,15 @@ dependencies = [ "terminal_size", ] +[[package]] +name = "clap_complete" +version = "4.5.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9646e2e245bf62f45d39a0f3f36f1171ad1ea0d6967fd114bca72cb02a8fcdfb" +dependencies = [ + "clap", +] + [[package]] name = "clap_derive" version = "4.5.18" @@ -1182,6 +1191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", + "serde", ] [[package]] @@ -1991,9 +2001,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -2006,9 +2016,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -2016,15 +2026,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -2044,9 +2054,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-locks" @@ -2060,9 +2070,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -2071,15 +2081,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -2093,9 +2103,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -2144,9 +2154,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git_version_macro" @@ -2241,7 +2251,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2260,7 +2270,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2283,6 +2293,12 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "hashers" version = "1.0.1" @@ -2415,9 +2431,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -2511,7 +2527,7 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "log", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -2674,12 +2690,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -2715,9 +2731,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is_terminal_polyfill" @@ -2798,9 +2814,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" dependencies = [ "wasm-bindgen", ] @@ -2834,7 +2850,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "rustls-platform-verifier", "soketto", @@ -2887,7 +2903,7 @@ dependencies = [ "hyper-util", "jsonrpsee-core", "jsonrpsee-types", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-platform-verifier", "serde", "serde_json", @@ -3485,21 +3501,18 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.20.1" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" -dependencies = [ - "portable-atomic", -] +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "open-fastrlp" @@ -3606,7 +3619,7 @@ dependencies = [ "bytes", "http 1.1.0", "opentelemetry", - "reqwest 0.12.7", + "reqwest 0.12.8", ] [[package]] @@ -3623,7 +3636,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost 0.13.3", - "reqwest 0.12.7", + "reqwest 0.12.8", "thiserror", "tokio", "tonic", @@ -3831,7 +3844,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.6.0", ] [[package]] @@ -3899,18 +3912,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", @@ -4048,9 +4061,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] @@ -4406,9 +4419,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64 0.22.1", "bytes", @@ -4432,7 +4445,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "serde", "serde_json", "serde_urlencoded", @@ -4610,9 +4623,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "aws-lc-rs", "log", @@ -4631,7 +4644,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", "security-framework", @@ -4648,11 +4661,10 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] @@ -4673,7 +4685,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -4767,9 +4779,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -5102,7 +5114,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -5398,7 +5410,7 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.5.0", + "indexmap 2.6.0", "log", "memchr", "once_cell", @@ -5560,9 +5572,9 @@ dependencies = [ [[package]] name = "sqruff-lib" -version = "0.19.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd3d7d11b58d658bf0e33d6729a92a81790ffb757440828a7b01869a40314b5f" +checksum = "676775189e83a98fc603d59fc6d760a66895d511502a538081dac993fde1a09a" dependencies = [ "ahash", "anstyle", @@ -5573,7 +5585,7 @@ dependencies = [ "enum_dispatch", "fancy-regex", "getrandom", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.13.0", "lazy-regex", "nohash-hasher", @@ -5595,14 +5607,14 @@ dependencies = [ [[package]] name = "sqruff-lib-core" -version = "0.19.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b19ebfd19c2bb1fdf8ca626f451645d89b74fa696f3cc1286989e58436f791" +checksum = "48ec5ba65376ae9ba3e3dda153668dcb6452a7212ee7b4c9d48e053eb4f0f3fa" dependencies = [ "ahash", "enum_dispatch", "fancy-regex", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.13.0", "nohash-hasher", "pretty_assertions", @@ -5616,9 +5628,9 @@ dependencies = [ [[package]] name = "sqruff-lib-dialects" -version = "0.19.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60dc004661c65d9163edaa876e6bb2fbe7a0bcf7f00cb0e13428cd0b4ab4b27f" +checksum = "00fa1cd168dad593f8f6996d805acc1fd52c6d0ad0f6f5847a9cc22a6198cfc2" dependencies = [ "ahash", "itertools 0.13.0", @@ -5834,12 +5846,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ "rustix", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -6022,7 +6034,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -6094,7 +6106,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] @@ -6105,7 +6117,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -6374,9 +6386,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -6401,9 +6413,9 @@ dependencies = [ [[package]] name = "unicode-properties" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-width" @@ -6587,9 +6599,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" dependencies = [ "cfg-if", "once_cell", @@ -6598,9 +6610,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" dependencies = [ "bumpalo", "log", @@ -6613,9 +6625,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "65471f79c1022ffa5291d33520cbbb53b7687b01c2f8e83b57d102eed7ed479d" dependencies = [ "cfg-if", "js-sys", @@ -6625,9 +6637,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6635,9 +6647,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" dependencies = [ "proc-macro2", "quote", @@ -6648,15 +6660,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "44188d185b5bdcae1052d08bcbcf9091a5524038d4572cc4f4f2bb9d5554ddd9" dependencies = [ "js-sys", "wasm-bindgen", @@ -7101,24 +7113,46 @@ dependencies = [ ] [[package]] -name = "zk_inception" +name = "zkevm_opcode_defs" +version = "0.132.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" +dependencies = [ + "bitflags 2.6.0", + "blake2", + "ethereum-types", + "k256 0.11.6", + "lazy_static", + "sha2_ce", + "sha3_ce", +] + +[[package]] +name = "zkstack" version = "0.1.0" dependencies = [ "anyhow", + "chrono", "clap", "clap-markdown", + "clap_complete", "cliclack", "common", "config", + "dirs", "ethers", - "eyre", + "futures", "human-panic", "lazy_static", + "prost 0.12.6", + "rand", + "reqwest 0.12.8", "secrecy", "serde", "serde_json", "serde_yaml", "slugify-rs", + "sqruff-lib", "strum", "thiserror", "tokio", @@ -7130,51 +7164,15 @@ dependencies = [ "zksync_config", "zksync_consensus_crypto", "zksync_consensus_roles", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", + "zksync_protobuf_config", "zksync_system_constants", "zksync_types", "zksync_web3_decl", ] -[[package]] -name = "zk_supervisor" -version = "0.1.0" -dependencies = [ - "anyhow", - "chrono", - "clap", - "clap-markdown", - "common", - "config", - "ethers", - "futures", - "human-panic", - "serde", - "serde_json", - "serde_yaml", - "sqruff-lib", - "strum", - "tokio", - "types", - "url", - "xshell", - "zksync_basic_types", -] - -[[package]] -name = "zkevm_opcode_defs" -version = "0.132.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" -dependencies = [ - "bitflags 2.6.0", - "blake2", - "ethereum-types", - "k256 0.11.6", - "lazy_static", - "sha2_ce", - "sha3_ce", -] - [[package]] name = "zksync_basic_types" version = "0.1.0" @@ -7196,9 +7194,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -7221,7 +7219,11 @@ dependencies = [ "rand", "secrecy", "serde", + "strum", + "strum_macros", + "time", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -7230,9 +7232,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -7251,9 +7253,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -7273,9 +7275,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand", @@ -7325,9 +7327,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -7346,9 +7348,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck", @@ -7372,6 +7374,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -7406,7 +7409,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -7414,7 +7416,6 @@ dependencies = [ "thiserror", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", @@ -7434,7 +7435,7 @@ dependencies = [ "hex", "num", "once_cell", - "reqwest 0.12.7", + "reqwest 0.12.8", "serde", "serde_json", "thiserror", @@ -7480,7 +7481,7 @@ dependencies = [ "jsonrpsee", "pin-project-lite", "rlp", - "rustls 0.23.13", + "rustls 0.23.14", "serde", "serde_json", "thiserror", diff --git a/zk_toolbox/Cargo.toml b/zkstack_cli/Cargo.toml similarity index 81% rename from zk_toolbox/Cargo.toml rename to zkstack_cli/Cargo.toml index 0c447f18f07..b89ef9e62b3 100644 --- a/zk_toolbox/Cargo.toml +++ b/zkstack_cli/Cargo.toml @@ -3,8 +3,7 @@ members = [ "crates/common", "crates/config", "crates/types", - "crates/zk_inception", - "crates/zk_supervisor", + "crates/zkstack", "crates/git_version_macro", ] resolver = "2" @@ -16,8 +15,8 @@ homepage = "https://zksync.io/" license = "MIT OR Apache-2.0" authors = ["The Matter Labs Team "] exclude = ["./github"] -repository = "https://github.com/matter-labs/zk_toolbox/" -description = "ZK Toolbox is a set of tools for working with zk stack." +repository = "https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli/" +description = "ZK Stack CLI is a set of tools for working with zk stack." keywords = ["zk", "cryptography", "blockchain", "ZKStack", "ZKsync"] @@ -33,25 +32,29 @@ zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } zksync_system_constants = { path = "../core/lib/constants" } -zksync_consensus_roles = "=0.3.0" -zksync_consensus_crypto = "=0.3.0" -zksync_protobuf = "=0.3.0" +zksync_consensus_roles = "=0.5.0" +zksync_consensus_crypto = "=0.5.0" +zksync_consensus_utils = "=0.5.0" +zksync_protobuf = "=0.5.0" +zksync_protobuf_build = "=0.5.0" zksync_types = { path = "../core/lib/types" } zksync_web3_decl = { path = "../core/lib/web3_decl" } # External dependencies anyhow = "1.0.82" clap = { version = "4.4", features = ["derive", "wrap_help", "string"] } +clap_complete = "4.5.33" +dirs = "5.0.1" slugify-rs = "0.0.3" cliclack = "0.2.5" console = "0.15.8" chrono = "0.4.38" -eyre = "0.6.12" ethers = "2.0" futures = "0.3.30" human-panic = "2.0" lazy_static = "1.4.0" once_cell = "1.19.0" +prost = "0.12.1" rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/zk_toolbox/README.md b/zkstack_cli/README.md similarity index 84% rename from zk_toolbox/README.md rename to zkstack_cli/README.md index a3b44fa98b3..e8116508821 100644 --- a/zk_toolbox/README.md +++ b/zkstack_cli/README.md @@ -1,11 +1,7 @@ -# zk_toolbox +# ZK Stack CLI -Toolkit for creating and managing ZK Stack chains. - -## ZK Inception - -`ZK Inception` facilitates the creation and management of ZK Stacks. Commands are interactive but can also accept -arguments via the command line. +Toolkit for creating and managing ZK Stack chains. `ZK Stack CLI` facilitates the creation and management of ZK Stacks. +Commands are interactive but can also accept arguments via the command line. ### Dependencies @@ -14,19 +10,35 @@ dependencies on your machine. Ignore the Environment section for now. ### Installation -Install `zk_inception` from Git: +You can use `zkstackup` to install and manage `zkstack`: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +Then install the most recent version with: ```bash -cargo install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor --force +zkstackup ``` Or manually build from a local copy of the [ZKsync](https://github.com/matter-labs/zksync-era/) repository: ```bash -./bin/zkt +zkstackup --local +``` + +This command installs `zkstack` from the current repository. + +#### Manual installation + +Run from the repository root: + +```bash +cargo install --path zkstack_cli/crates/zkstack --force --locked ``` -This command installs `zk_inception` and `zk_supervisor` from the current repository. +And make sure that `.cargo/bin` is included into `PATH`. ### Foundry Integration @@ -51,13 +63,13 @@ BridgeHub, shared bridges, and state transition managers. To create a ZK Stack project, start by creating an ecosystem: ```bash -zk_inception ecosystem create +zkstack ecosystem create ``` If you choose not to start database & L1 containers after creating the ecosystem, you can later run: ```bash -zk_inception containers +zkstack containers ``` Execute subsequent commands from within the created ecosystem folder: @@ -71,14 +83,14 @@ cd path/to/ecosystem/name If the ecosystem has never been deployed before, initialize it: ```bash -zk_inception ecosystem init +zkstack ecosystem init ``` This initializes the first ZK chain, which becomes the default. Override with `--chain ` if needed. For default params, use: ```bash -zk_inception ecosystem init --dev +zkstack ecosystem init --dev ``` If the process gets stuck, resume it with `--resume`. This flag keeps track of already sent transactions and sends new @@ -98,7 +110,7 @@ To verify contracts, use the `--verify` flag. To change the default ZK chain: ```bash -zk_inception ecosystem change-default-chain +zkstack ecosystem change-default-chain ``` IMPORTANT: Currently, you cannot use an existing ecosystem to register a new chain. This feature will be added in the @@ -109,19 +121,19 @@ future. To setup [era-observability](https://github.com/matter-labs/era-observability): ```bash -zk_inception ecosystem setup-observability +zkstack ecosystem setup-observability ``` Or run: ```bash -zk_inception ecosystem init --observability +zkstack ecosystem init --observability ``` To start observability containers: ```bash -zk_inception containers --observability +zkstack containers --observability ``` ### ZK Chain @@ -131,7 +143,7 @@ zk_inception containers --observability The first ZK chain is generated upon ecosystem creation. Create additional chains and switch between them: ```bash -zk_inception chain create +zkstack chain create ``` #### Init @@ -139,7 +151,7 @@ zk_inception chain create Deploy contracts and initialize Zk Chain: ```bash -zk_inception chain init +zkstack chain init ``` This registers the chain in the BridgeHub and deploys all necessary contracts. Manual initialization steps: @@ -154,7 +166,7 @@ by a third party). To run the chain: ```bash -zk_inception server +zkstack server ``` You can specify the component you want to run using `--components` flag @@ -180,19 +192,13 @@ information. Initialize the prover: ```bash -zk_inception prover init -``` - -Generate setup keys: - -```bash -zk_inception prover generate-sk +zkstack prover init ``` Run the prover: ```bash -zk_inception prover run +zkstack prover run ``` Specify the prover component with `--component `. Components: @@ -208,13 +214,13 @@ For `witness-generator`, specify the round with `--round `. Rounds: Download required binaries (`solc`, `zksolc`, `vyper`, `zkvyper`): ```bash -zk_inception contract-verifier init +zkstack contract-verifier init ``` Run the contract verifier: ```bash -zk_inception contract-verifier run +zkstack contract-verifier run ``` ### External Node @@ -226,7 +232,7 @@ Commands for running an external node: Prepare configs: ```bash -zk_inception en configs +zkstack en configs ``` This ensures no port conflicts with the main node. @@ -236,7 +242,7 @@ This ensures no port conflicts with the main node. Prepare the databases: ```bash -zk_inception en init +zkstack en init ``` #### Run @@ -244,7 +250,7 @@ zk_inception en init Run the external node: ```bash -zk_inception en run +zkstack en run ``` ### Portal @@ -253,7 +259,7 @@ Once you have at least one chain initialized, you can run the [portal](https://g web-app to bridge tokens between L1 and L2 and more: ```bash -zk_inception portal +zkstack portal ``` This command will start the dockerized portal app using configuration from `apps/portal.config.json` file inside your @@ -269,7 +275,7 @@ contracts and more. First, each chain should be initialized: ```bash -zk_inception explorer init +zkstack explorer init ``` This command creates a database to store explorer data and generatesdocker compose file with explorer services @@ -278,7 +284,7 @@ This command creates a database to store explorer data and generatesdocker compo Next, for each chain you want to have an explorer, you need to start its backend services: ```bash -zk_inception explorer backend --chain +zkstack explorer backend --chain ``` This command uses previously created docker compose file to start the services (api, data fetcher, worker) required for @@ -287,7 +293,7 @@ the explorer. Finally, you can run the explorer app: ```bash -zk_inception explorer run +zkstack explorer run ``` This command will start the dockerized explorer app using configuration from `apps/explorer.config.json` file inside @@ -299,22 +305,22 @@ your ecosystem directory. You can edit this file to configure the app if needed. To update your node: ```bash -zk_inception update +zkstack update ``` This command pulls the latest changes, syncs the general config for all chains, and raises a warning if L1 upgrades are needed. -## ZK Supervisor +## Dev -Tools for developing ZKsync. +The subcommand `zkstack dev` offers tools for developing ZKsync. ### Database Commands for database manipulation: ```bash -zk_supervisor db +zkstack dev db ``` Possible commands: @@ -332,7 +338,7 @@ Possible commands: Clean artifacts: ```bash -zk_supervisor clean +zkstack dev clean ``` Possible commands: @@ -346,7 +352,7 @@ Possible commands: Run ZKsync tests: ```bash -zk_supervisor test +zkstack dev test ``` Possible commands: @@ -364,7 +370,7 @@ Possible commands: Create a snapshot of the current chain: ```bash -zks snapshot create +zkstack dev snapshot create ``` ### Contracts @@ -372,7 +378,7 @@ zks snapshot create Build contracts: ```bash -zks contracts +zkstack dev contracts ``` ### Format @@ -380,7 +386,7 @@ zks contracts Format code: ```bash -zks fmt +zkstack dev fmt ``` By default, this command runs all formatters. To run a specific fomatter use the following subcommands: @@ -394,7 +400,7 @@ By default, this command runs all formatters. To run a specific fomatter use the Lint code: ```bash -zks lint +zkstack dev lint ``` By default, this command runs the linter on all files. To target specific file types, use the `--target` option. diff --git a/zk_toolbox/crates/common/Cargo.toml b/zkstack_cli/crates/common/Cargo.toml similarity index 100% rename from zk_toolbox/crates/common/Cargo.toml rename to zkstack_cli/crates/common/Cargo.toml diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zkstack_cli/crates/common/src/cmd.rs similarity index 100% rename from zk_toolbox/crates/common/src/cmd.rs rename to zkstack_cli/crates/common/src/cmd.rs diff --git a/zk_toolbox/crates/common/src/config.rs b/zkstack_cli/crates/common/src/config.rs similarity index 100% rename from zk_toolbox/crates/common/src/config.rs rename to zkstack_cli/crates/common/src/config.rs diff --git a/zkstack_cli/crates/common/src/contracts.rs b/zkstack_cli/crates/common/src/contracts.rs new file mode 100644 index 00000000000..0f771bb9dad --- /dev/null +++ b/zkstack_cli/crates/common/src/contracts.rs @@ -0,0 +1,52 @@ +use std::path::PathBuf; + +use xshell::{cmd, Shell}; + +use crate::cmd::Cmd; + +pub fn build_test_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("etc/contracts-test-data")); + Cmd::new(cmd!(shell, "yarn install")).run()?; + Ok(Cmd::new(cmd!(shell, "yarn build")).run()?) +} + +pub fn build_l1_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/l1-contracts")); + Ok(Cmd::new(cmd!(shell, "yarn build")).run()?) +} + +pub fn build_l1_da_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/da-contracts")); + Ok(Cmd::new(cmd!(shell, "forge build")).run()?) +} + +pub fn build_l2_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/l2-contracts")); + // Ok(Cmd::new(cmd!( + // shell, + // "forge build --zksync --zk-enable-eravm-extensions" + // )) + // .run()?) + Cmd::new(cmd!(shell, "yarn build")).run()?; + Ok(()) +} + +pub fn build_system_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/system-contracts")); + // Do not update era-contract's lockfile to avoid dirty submodule + Cmd::new(cmd!(shell, "yarn install --frozen-lockfile")).run()?; + Cmd::new(cmd!(shell, "yarn build")).run()?; + Ok(()) + // Cmd::new(cmd!(shell, "yarn preprocess:system-contracts")).run()?; + // Cmd::new(cmd!( + // shell, + // "forge build --zksync --zk-enable-eravm-extensions" + // )) + // .run()?; + // Cmd::new(cmd!(shell, "yarn preprocess:bootloader")).run()?; + // Ok(Cmd::new(cmd!( + // shell, + // "forge build --zksync --zk-enable-eravm-extensions" + // )) + // .run()?) +} diff --git a/zk_toolbox/crates/common/src/db.rs b/zkstack_cli/crates/common/src/db.rs similarity index 100% rename from zk_toolbox/crates/common/src/db.rs rename to zkstack_cli/crates/common/src/db.rs diff --git a/zk_toolbox/crates/common/src/docker.rs b/zkstack_cli/crates/common/src/docker.rs similarity index 89% rename from zk_toolbox/crates/common/src/docker.rs rename to zkstack_cli/crates/common/src/docker.rs index a5731808814..71e2040ee31 100644 --- a/zk_toolbox/crates/common/src/docker.rs +++ b/zkstack_cli/crates/common/src/docker.rs @@ -14,7 +14,11 @@ pub fn up(shell: &Shell, docker_compose_file: &str, detach: bool) -> anyhow::Res } pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run()?) + Ok(Cmd::new(cmd!( + shell, + "docker compose -f {docker_compose_file} down -v" + )) + .run()?) } pub fn run(shell: &Shell, docker_image: &str, docker_args: Vec) -> anyhow::Result<()> { diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zkstack_cli/crates/common/src/ethereum.rs similarity index 96% rename from zk_toolbox/crates/common/src/ethereum.rs rename to zkstack_cli/crates/common/src/ethereum.rs index 33caaad9789..2100746fecf 100644 --- a/zk_toolbox/crates/common/src/ethereum.rs +++ b/zkstack_cli/crates/common/src/ethereum.rs @@ -6,18 +6,17 @@ use ethers::{ middleware::MiddlewareBuilder, prelude::{Http, LocalWallet, Provider, Signer, SignerMiddleware}, providers::Middleware, - types::{Address, TransactionRequest, H256}, + types::{Address, TransactionRequest}, }; use types::TokenInfo; use crate::{logger, wallets::Wallet}; pub fn create_ethers_client( - private_key: H256, + mut wallet: LocalWallet, l1_rpc: String, chain_id: Option, ) -> anyhow::Result, ethers::prelude::Wallet>> { - let mut wallet = LocalWallet::from_bytes(private_key.as_bytes())?; if let Some(chain_id) = chain_id { wallet = wallet.with_chain_id(chain_id); } diff --git a/zk_toolbox/crates/common/src/external_node.rs b/zkstack_cli/crates/common/src/external_node.rs similarity index 100% rename from zk_toolbox/crates/common/src/external_node.rs rename to zkstack_cli/crates/common/src/external_node.rs diff --git a/zk_toolbox/crates/common/src/files.rs b/zkstack_cli/crates/common/src/files.rs similarity index 100% rename from zk_toolbox/crates/common/src/files.rs rename to zkstack_cli/crates/common/src/files.rs diff --git a/zk_toolbox/crates/common/src/forge.rs b/zkstack_cli/crates/common/src/forge.rs similarity index 97% rename from zk_toolbox/crates/common/src/forge.rs rename to zkstack_cli/crates/common/src/forge.rs index 846685ab29a..a03795facfa 100644 --- a/zk_toolbox/crates/common/src/forge.rs +++ b/zkstack_cli/crates/common/src/forge.rs @@ -159,10 +159,12 @@ impl ForgeScript { } // Do not start the script if balance is not enough - pub fn private_key(&self) -> Option { + pub fn private_key(&self) -> Option { self.args.args.iter().find_map(|a| { if let ForgeScriptArg::PrivateKey { private_key } = a { - Some(H256::from_str(private_key).unwrap()) + let key = H256::from_str(private_key).unwrap(); + let key = LocalWallet::from_bytes(key.as_bytes()).unwrap(); + Some(key) } else { None } @@ -180,11 +182,7 @@ impl ForgeScript { } pub fn address(&self) -> Option
{ - self.private_key().and_then(|a| { - LocalWallet::from_bytes(a.as_bytes()) - .ok() - .map(|a| Address::from_slice(a.address().as_bytes())) - }) + self.private_key().map(|k| k.address()) } pub async fn get_the_balance(&self) -> anyhow::Result> { @@ -299,7 +297,7 @@ pub struct ForgeScriptArgs { pub zksync: bool, /// List of additional arguments that can be passed through the CLI. /// - /// e.g.: `zk_inception init -a --private-key=` + /// e.g.: `zkstack init -a --private-key=` #[clap(long, short)] #[arg(trailing_var_arg = true, allow_hyphen_values = true, hide = false)] additional_args: Vec, diff --git a/zk_toolbox/crates/common/src/git.rs b/zkstack_cli/crates/common/src/git.rs similarity index 100% rename from zk_toolbox/crates/common/src/git.rs rename to zkstack_cli/crates/common/src/git.rs diff --git a/zk_toolbox/crates/common/src/hardhat.rs b/zkstack_cli/crates/common/src/hardhat.rs similarity index 100% rename from zk_toolbox/crates/common/src/hardhat.rs rename to zkstack_cli/crates/common/src/hardhat.rs diff --git a/zk_toolbox/crates/common/src/lib.rs b/zkstack_cli/crates/common/src/lib.rs similarity index 91% rename from zk_toolbox/crates/common/src/lib.rs rename to zkstack_cli/crates/common/src/lib.rs index 6dc26bbba9f..91804bfe070 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zkstack_cli/crates/common/src/lib.rs @@ -4,6 +4,7 @@ mod term; pub mod cmd; pub mod config; +pub mod contracts; pub mod db; pub mod docker; pub mod ethereum; @@ -20,7 +21,7 @@ pub mod yaml; pub use prerequisites::{ check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, - PROVER_CLI_PREREQUISITE, WGET_PREREQUISITE, + PROVER_CLI_PREREQUISITE, }; pub use prompt::{init_prompt_theme, Prompt, PromptConfirm, PromptSelect}; pub use term::{error, logger, spinner}; diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zkstack_cli/crates/common/src/prerequisites.rs similarity index 52% rename from zk_toolbox/crates/common/src/prerequisites.rs rename to zkstack_cli/crates/common/src/prerequisites.rs index 665096d8486..72d3c7d8041 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zkstack_cli/crates/common/src/prerequisites.rs @@ -2,79 +2,99 @@ use xshell::{cmd, Shell}; use crate::{cmd::Cmd, logger}; -const PREREQUISITES: [Prerequisite; 5] = [ - Prerequisite { - name: "git", - download_link: "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git", - }, - Prerequisite { - name: "docker", - download_link: "https://docs.docker.com/get-docker/", - }, - Prerequisite { - name: "forge", - download_link: "https://book.getfoundry.sh/getting-started/installation", - }, - Prerequisite { - name: "cargo", - download_link: "https://doc.rust-lang.org/cargo/getting-started/installation.html", - }, - Prerequisite { - name: "yarn", - download_link: "https://yarnpkg.com/getting-started/install", - }, -]; +fn prerequisites() -> [Prerequisite; 5] { + [ + Prerequisite { + name: "git", + download_link: "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git", + custom_validator: None, + }, + Prerequisite { + name: "docker", + download_link: "https://docs.docker.com/get-docker/", + custom_validator: None, + }, + Prerequisite { + name: "forge", + download_link: + "https://github.com/matter-labs/foundry-zksync?tab=readme-ov-file#quick-install", + custom_validator: Some(Box::new(|| { + let shell = Shell::new().unwrap(); + let Ok(result) = Cmd::new(cmd!(shell, "forge build --help")).run_with_output() + else { + return false; + }; + let Ok(stdout) = String::from_utf8(result.stdout) else { + return false; + }; + stdout.contains("ZKSync configuration") + })), + }, + Prerequisite { + name: "cargo", + download_link: "https://doc.rust-lang.org/cargo/getting-started/installation.html", + custom_validator: None, + }, + Prerequisite { + name: "yarn", + download_link: "https://yarnpkg.com/getting-started/install", + custom_validator: None, + }, + ] +} const DOCKER_COMPOSE_PREREQUISITE: Prerequisite = Prerequisite { name: "docker compose", download_link: "https://docs.docker.com/compose/install/", + custom_validator: None, }; pub const GPU_PREREQUISITES: [Prerequisite; 3] = [ Prerequisite { name: "cmake", download_link: "https://cmake.org/download/", + custom_validator: None, }, Prerequisite { name: "nvcc", download_link: "https://developer.nvidia.com/cuda-downloads", + custom_validator: None, }, // CUDA toolkit Prerequisite { name: "nvidia-smi", download_link: "https://developer.nvidia.com/cuda-downloads", + custom_validator: None, }, // CUDA GPU driver ]; -pub const WGET_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { - name: "wget", - download_link: "https://www.gnu.org/software/wget/", -}]; - pub const GCLOUD_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "gcloud", download_link: "https://cloud.google.com/sdk/docs/install", + custom_validator: None, }]; pub const PROVER_CLI_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "prover_cli", download_link: "https://github.com/matter-labs/zksync-era/tree/main/prover/crates/bin/prover_cli", + custom_validator: None, }]; pub struct Prerequisite { name: &'static str, download_link: &'static str, + custom_validator: Option bool>>, } pub fn check_general_prerequisites(shell: &Shell) { - check_prerequisites(shell, &PREREQUISITES, true); + check_prerequisites(shell, &prerequisites(), true); } pub fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { let mut missing_prerequisites = vec![]; for prerequisite in prerequisites { - if !check_prerequisite(shell, prerequisite.name) { + if !check_prerequisite(shell, prerequisite) { missing_prerequisites.push(prerequisite); } } @@ -100,8 +120,15 @@ pub fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_ } } -fn check_prerequisite(shell: &Shell, name: &str) -> bool { - Cmd::new(cmd!(shell, "which {name}")).run().is_ok() +fn check_prerequisite(shell: &Shell, prerequisite: &Prerequisite) -> bool { + let name = prerequisite.name; + if Cmd::new(cmd!(shell, "which {name}")).run().is_err() { + return false; + } + let Some(custom) = &prerequisite.custom_validator else { + return true; + }; + custom() } fn check_docker_compose_prerequisite(shell: &Shell) -> bool { diff --git a/zk_toolbox/crates/common/src/prompt/confirm.rs b/zkstack_cli/crates/common/src/prompt/confirm.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/confirm.rs rename to zkstack_cli/crates/common/src/prompt/confirm.rs diff --git a/zk_toolbox/crates/common/src/prompt/input.rs b/zkstack_cli/crates/common/src/prompt/input.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/input.rs rename to zkstack_cli/crates/common/src/prompt/input.rs diff --git a/zk_toolbox/crates/common/src/prompt/mod.rs b/zkstack_cli/crates/common/src/prompt/mod.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/mod.rs rename to zkstack_cli/crates/common/src/prompt/mod.rs diff --git a/zk_toolbox/crates/common/src/prompt/select.rs b/zkstack_cli/crates/common/src/prompt/select.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/select.rs rename to zkstack_cli/crates/common/src/prompt/select.rs diff --git a/zk_toolbox/crates/common/src/server.rs b/zkstack_cli/crates/common/src/server.rs similarity index 100% rename from zk_toolbox/crates/common/src/server.rs rename to zkstack_cli/crates/common/src/server.rs diff --git a/zk_toolbox/crates/common/src/term/error.rs b/zkstack_cli/crates/common/src/term/error.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/error.rs rename to zkstack_cli/crates/common/src/term/error.rs diff --git a/zk_toolbox/crates/common/src/term/logger.rs b/zkstack_cli/crates/common/src/term/logger.rs similarity index 97% rename from zk_toolbox/crates/common/src/term/logger.rs rename to zkstack_cli/crates/common/src/term/logger.rs index 17e518d9ad9..035e81dc135 100644 --- a/zk_toolbox/crates/common/src/term/logger.rs +++ b/zkstack_cli/crates/common/src/term/logger.rs @@ -14,7 +14,7 @@ fn term_write(msg: impl Display) { } pub fn intro() { - cliclak_intro(style(" ZKsync toolbox ").on_cyan().black()).unwrap(); + cliclak_intro(style(" ZK Stack CLI ").on_cyan().black()).unwrap(); } pub fn outro(msg: impl Display) { diff --git a/zk_toolbox/crates/common/src/term/mod.rs b/zkstack_cli/crates/common/src/term/mod.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/mod.rs rename to zkstack_cli/crates/common/src/term/mod.rs diff --git a/zkstack_cli/crates/common/src/term/spinner.rs b/zkstack_cli/crates/common/src/term/spinner.rs new file mode 100644 index 00000000000..3ec2631804a --- /dev/null +++ b/zkstack_cli/crates/common/src/term/spinner.rs @@ -0,0 +1,84 @@ +use std::{fmt::Display, io::IsTerminal, time::Instant}; + +use cliclack::{spinner, ProgressBar}; + +use crate::{config::global_config, logger}; + +/// Spinner is a helper struct to show a spinner while some operation is running. +pub struct Spinner { + msg: String, + output: SpinnerOutput, + time: Instant, +} + +impl Spinner { + /// Create a new spinner with a message. + pub fn new(msg: &str) -> Self { + let output = if std::io::stdout().is_terminal() { + let pb = spinner(); + pb.start(msg); + if global_config().verbose { + pb.stop(msg); + } + SpinnerOutput::Progress(pb) + } else { + logger::info(msg); + SpinnerOutput::Plain() + }; + Spinner { + msg: msg.to_owned(), + output, + time: Instant::now(), + } + } + + /// Manually finish the spinner. + pub fn finish(self) { + self.output.stop(format!( + "{} done in {} secs", + self.msg, + self.time.elapsed().as_secs_f64() + )); + } + + /// Interrupt the spinner with a failed message. + pub fn fail(self) { + self.output.error(format!( + "{} failed in {} secs", + self.msg, + self.time.elapsed().as_secs_f64() + )); + } + + /// Freeze the spinner with current message. + pub fn freeze(self) { + self.output.stop(self.msg); + } +} + +/// An abstraction that makes interactive progress bar optional in environments where virtual +/// terminal is not available. +/// +/// Uses plain `logger::{info,error}` as the fallback. +/// +/// See https://github.com/console-rs/indicatif/issues/530 for more details. +enum SpinnerOutput { + Progress(ProgressBar), + Plain(), +} + +impl SpinnerOutput { + fn error(&self, msg: impl Display) { + match self { + SpinnerOutput::Progress(pb) => pb.error(msg), + SpinnerOutput::Plain() => logger::error(msg), + } + } + + fn stop(self, msg: impl Display) { + match self { + SpinnerOutput::Progress(pb) => pb.stop(msg), + SpinnerOutput::Plain() => logger::info(msg), + } + } +} diff --git a/zk_toolbox/crates/common/src/version.rs b/zkstack_cli/crates/common/src/version.rs similarity index 100% rename from zk_toolbox/crates/common/src/version.rs rename to zkstack_cli/crates/common/src/version.rs diff --git a/zkstack_cli/crates/common/src/wallets.rs b/zkstack_cli/crates/common/src/wallets.rs new file mode 100644 index 00000000000..43a9864474c --- /dev/null +++ b/zkstack_cli/crates/common/src/wallets.rs @@ -0,0 +1,102 @@ +use ethers::{ + core::rand::{CryptoRng, Rng}, + signers::{coins_bip39::English, LocalWallet, MnemonicBuilder, Signer}, + types::{Address, H256}, +}; +use serde::{Deserialize, Serialize}; +use types::parse_h256; + +#[derive(Serialize, Deserialize)] +struct WalletSerde { + pub address: Address, + pub private_key: Option, +} + +#[derive(Debug, Clone)] +pub struct Wallet { + pub address: Address, + pub private_key: Option, +} + +impl<'de> Deserialize<'de> for Wallet { + fn deserialize>(d: D) -> Result { + let x = WalletSerde::deserialize(d)?; + Ok(match x.private_key { + None => Self { + address: x.address, + private_key: None, + }, + Some(k) => { + let k = LocalWallet::from_bytes(k.as_bytes()).map_err(serde::de::Error::custom)?; + if k.address() != x.address { + return Err(serde::de::Error::custom(format!( + "address does not match private key: got address {:#x}, want {:#x}", + x.address, + k.address(), + ))); + } + Self::new(k) + } + }) + } +} + +impl Serialize for Wallet { + fn serialize(&self, s: S) -> Result { + WalletSerde { + address: self.address, + private_key: self.private_key_h256(), + } + .serialize(s) + } +} + +impl Wallet { + pub fn private_key_h256(&self) -> Option { + self.private_key + .as_ref() + .map(|k| parse_h256(k.signer().to_bytes().as_slice()).unwrap()) + } + + pub fn random(rng: &mut (impl Rng + CryptoRng)) -> Self { + Self::new(LocalWallet::new(rng)) + } + + pub fn new(private_key: LocalWallet) -> Self { + Self { + address: private_key.address(), + private_key: Some(private_key), + } + } + + pub fn from_mnemonic(mnemonic: &str, base_path: &str, index: u32) -> anyhow::Result { + let wallet = MnemonicBuilder::::default() + .phrase(mnemonic) + .derivation_path(&format!("{}/{}", base_path, index))? + .build()?; + Ok(Self::new(wallet)) + } + + pub fn empty() -> Self { + Self { + address: Address::zero(), + private_key: None, + } + } +} + +#[test] +fn test_load_localhost_wallets() { + let wallet = Wallet::from_mnemonic( + "stuff slice staff easily soup parent arm payment cotton trade scatter struggle", + "m/44'/60'/0'/0", + 1, + ) + .unwrap(); + assert_eq!( + wallet.address, + Address::from_slice( + ðers::utils::hex::decode("0xa61464658AfeAf65CccaaFD3a512b69A83B77618").unwrap() + ) + ); +} diff --git a/zk_toolbox/crates/common/src/withdraw.rs b/zkstack_cli/crates/common/src/withdraw.rs similarity index 100% rename from zk_toolbox/crates/common/src/withdraw.rs rename to zkstack_cli/crates/common/src/withdraw.rs diff --git a/zk_toolbox/crates/common/src/yaml.rs b/zkstack_cli/crates/common/src/yaml.rs similarity index 100% rename from zk_toolbox/crates/common/src/yaml.rs rename to zkstack_cli/crates/common/src/yaml.rs diff --git a/zk_toolbox/crates/config/Cargo.toml b/zkstack_cli/crates/config/Cargo.toml similarity index 100% rename from zk_toolbox/crates/config/Cargo.toml rename to zkstack_cli/crates/config/Cargo.toml diff --git a/zk_toolbox/crates/config/src/apps.rs b/zkstack_cli/crates/config/src/apps.rs similarity index 96% rename from zk_toolbox/crates/config/src/apps.rs rename to zkstack_cli/crates/config/src/apps.rs index 697b35b0851..3bd611bdc32 100644 --- a/zk_toolbox/crates/config/src/apps.rs +++ b/zkstack_cli/crates/config/src/apps.rs @@ -5,7 +5,7 @@ use xshell::Shell; use crate::{ consts::{APPS_CONFIG_FILE, DEFAULT_EXPLORER_PORT, DEFAULT_PORTAL_PORT, LOCAL_CONFIGS_PATH}, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkStackConfig}, }; /// Ecosystem level configuration for the apps (portal and explorer). @@ -20,7 +20,7 @@ pub struct AppEcosystemConfig { pub http_port: u16, } -impl ZkToolboxConfig for AppsEcosystemConfig {} +impl ZkStackConfig for AppsEcosystemConfig {} impl FileConfigWithDefaultName for AppsEcosystemConfig { const FILE_NAME: &'static str = APPS_CONFIG_FILE; } diff --git a/zk_toolbox/crates/config/src/chain.rs b/zkstack_cli/crates/config/src/chain.rs similarity index 94% rename from zk_toolbox/crates/config/src/chain.rs rename to zkstack_cli/crates/config/src/chain.rs index 7240260b380..e6b0d4f61e7 100644 --- a/zk_toolbox/crates/config/src/chain.rs +++ b/zkstack_cli/crates/config/src/chain.rs @@ -17,7 +17,7 @@ use crate::{ create_localhost_wallets, traits::{ FileConfigWithDefaultName, ReadConfig, ReadConfigWithBasePath, SaveConfig, - SaveConfigWithBasePath, ZkToolboxConfig, + SaveConfigWithBasePath, ZkStackConfig, }, ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, GATEWAY_FILE, }; @@ -41,6 +41,8 @@ pub struct ChainConfigInternal { pub wallet_creation: WalletCreation, #[serde(skip_serializing_if = "Option::is_none")] pub legacy_bridge: Option, + #[serde(default)] // for backward compatibility + pub evm_emulator: bool, } /// Chain configuration file. This file is created in the chain @@ -62,6 +64,7 @@ pub struct ChainConfig { pub wallet_creation: WalletCreation, pub shell: OnceCell, pub legacy_bridge: Option, + pub evm_emulator: bool, } impl Serialize for ChainConfig { @@ -88,8 +91,8 @@ impl ChainConfig { pub fn get_wallets_config(&self) -> anyhow::Result { let path = self.configs.join(WALLETS_FILE); - if let Ok(wallets) = WalletsConfig::read(self.get_shell(), &path) { - return Ok(wallets); + if self.get_shell().path_exists(&path) { + return WalletsConfig::read(self.get_shell(), &path); } if self.wallet_creation == WalletCreation::Localhost { let wallets = create_localhost_wallets(self.get_shell(), &self.link_to_code, self.id)?; @@ -170,6 +173,7 @@ impl ChainConfig { base_token: self.base_token.clone(), wallet_creation: self.wallet_creation, legacy_bridge: self.legacy_bridge, + evm_emulator: self.evm_emulator, } } } @@ -178,4 +182,4 @@ impl FileConfigWithDefaultName for ChainConfigInternal { const FILE_NAME: &'static str = CONFIG_NAME; } -impl ZkToolboxConfig for ChainConfigInternal {} +impl ZkStackConfig for ChainConfigInternal {} diff --git a/zk_toolbox/crates/config/src/consensus_config.rs b/zkstack_cli/crates/config/src/consensus_config.rs similarity index 100% rename from zk_toolbox/crates/config/src/consensus_config.rs rename to zkstack_cli/crates/config/src/consensus_config.rs diff --git a/zk_toolbox/crates/config/src/consensus_secrets.rs b/zkstack_cli/crates/config/src/consensus_secrets.rs similarity index 67% rename from zk_toolbox/crates/config/src/consensus_secrets.rs rename to zkstack_cli/crates/config/src/consensus_secrets.rs index 0e5c4592d2f..da551a45279 100644 --- a/zk_toolbox/crates/config/src/consensus_secrets.rs +++ b/zkstack_cli/crates/config/src/consensus_secrets.rs @@ -2,13 +2,13 @@ use std::path::Path; use xshell::Shell; use zksync_config::configs::consensus::ConsensusSecrets; -use zksync_protobuf_config::decode_yaml_repr; +use zksync_protobuf_config::read_yaml_repr; use crate::traits::ReadConfig; impl ReadConfig for ConsensusSecrets { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/consts.rs b/zkstack_cli/crates/config/src/consts.rs similarity index 97% rename from zk_toolbox/crates/config/src/consts.rs rename to zkstack_cli/crates/config/src/consts.rs index 4323e3166e9..c4895b333c7 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zkstack_cli/crates/config/src/consts.rs @@ -66,8 +66,6 @@ pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; /// Default port for the explorer data fetcher service pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; -/// Default port for consensus service -pub const DEFAULT_CONSENSUS_PORT: u16 = 3054; pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zkstack_cli/crates/config/src/contracts.rs similarity index 95% rename from zk_toolbox/crates/config/src/contracts.rs rename to zkstack_cli/crates/config/src/contracts.rs index 3210e4f1926..0213636c437 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zkstack_cli/crates/config/src/contracts.rs @@ -12,14 +12,14 @@ use crate::{ }, register_chain::output::RegisterChainOutput, }, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Deserialize, Serialize, Clone, Default)] pub struct ContractsConfig { pub create2_factory_addr: Address, pub create2_factory_salt: H256, - pub ecosystem_contracts: ToolboxEcosystemContracts, + pub ecosystem_contracts: EcosystemContracts, pub bridges: BridgesContracts, pub l1: L1Contracts, pub l2: L2Contracts, @@ -113,7 +113,8 @@ impl ContractsConfig { self.l1.access_control_restriction_addr = register_chain_output.access_control_restriction_addr; self.l1.chain_proxy_admin_addr = register_chain_output.chain_proxy_admin_addr; - self.l2.legacy_shared_bridge_addr = register_chain_output.l2_legacy_shared_bridge_addr; + self.l2.legacy_shared_bridge_addr = + Some(register_chain_output.l2_legacy_shared_bridge_addr); self.user_facing_diamond_proxy = register_chain_output.diamond_proxy_addr; } @@ -159,10 +160,10 @@ impl FileConfigWithDefaultName for ContractsConfig { const FILE_NAME: &'static str = CONTRACTS_FILE; } -impl ZkToolboxConfig for ContractsConfig {} +impl ZkStackConfig for ContractsConfig {} #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] -pub struct ToolboxEcosystemContracts { +pub struct EcosystemContracts { pub bridgehub_proxy_addr: Address, pub state_transition_proxy_addr: Address, pub transparent_proxy_admin_addr: Address, @@ -173,7 +174,7 @@ pub struct ToolboxEcosystemContracts { pub native_token_vault_addr: Address, } -impl ZkToolboxConfig for ToolboxEcosystemContracts {} +impl ZkStackConfig for EcosystemContracts {} #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct BridgesContracts { @@ -213,7 +214,7 @@ pub struct L2Contracts { pub default_l2_upgrader: Address, pub da_validator_addr: Address, pub l2_native_token_vault_proxy_addr: Address, - pub legacy_shared_bridge_addr: Address, pub consensus_registry: Option
, pub multicall3: Option
, + pub legacy_shared_bridge_addr: Option
, } diff --git a/zk_toolbox/crates/config/src/docker_compose.rs b/zkstack_cli/crates/config/src/docker_compose.rs similarity index 94% rename from zk_toolbox/crates/config/src/docker_compose.rs rename to zkstack_cli/crates/config/src/docker_compose.rs index 05c6e73eaea..2208c5a8654 100644 --- a/zk_toolbox/crates/config/src/docker_compose.rs +++ b/zkstack_cli/crates/config/src/docker_compose.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Default, Serialize, Deserialize, Clone)] pub struct DockerComposeConfig { @@ -34,7 +34,7 @@ pub struct DockerComposeService { pub other: serde_json::Value, } -impl ZkToolboxConfig for DockerComposeConfig {} +impl ZkStackConfig for DockerComposeConfig {} impl DockerComposeConfig { pub fn add_service(&mut self, name: &str, service: DockerComposeService) { diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zkstack_cli/crates/config/src/ecosystem.rs similarity index 93% rename from zk_toolbox/crates/config/src/ecosystem.rs rename to zkstack_cli/crates/config/src/ecosystem.rs index 5369ccb3c86..5fe85b175de 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zkstack_cli/crates/config/src/ecosystem.rs @@ -21,7 +21,7 @@ use crate::{ input::{Erc20DeploymentConfig, InitialDeploymentConfig}, output::{ERC20Tokens, Erc20Token}, }, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkStackConfig}, ChainConfig, ChainConfigInternal, ContractsConfig, WalletsConfig, }; @@ -94,9 +94,9 @@ impl FileConfigWithDefaultName for EcosystemConfig { const FILE_NAME: &'static str = CONFIG_NAME; } -impl ZkToolboxConfig for EcosystemConfigInternal {} +impl ZkStackConfig for EcosystemConfigInternal {} -impl ZkToolboxConfig for EcosystemConfig {} +impl ZkStackConfig for EcosystemConfig {} impl EcosystemConfig { fn get_shell(&self) -> &Shell { @@ -146,20 +146,20 @@ impl EcosystemConfig { .unwrap_or(self.default_chain.as_ref()) } - pub fn load_chain(&self, name: Option) -> Option { + pub fn load_chain(&self, name: Option) -> anyhow::Result { let name = name.unwrap_or(self.default_chain.clone()); self.load_chain_inner(&name) } - pub fn load_current_chain(&self) -> Option { + pub fn load_current_chain(&self) -> anyhow::Result { self.load_chain_inner(self.current_chain()) } - fn load_chain_inner(&self, name: &str) -> Option { + fn load_chain_inner(&self, name: &str) -> anyhow::Result { let path = self.chains.join(name).join(CONFIG_NAME); - let config = ChainConfigInternal::read(self.get_shell(), path.clone()).ok()?; + let config = ChainConfigInternal::read(self.get_shell(), path.clone())?; - Some(ChainConfig { + Ok(ChainConfig { id: config.id, name: config.name, chain_id: config.chain_id, @@ -178,6 +178,7 @@ impl EcosystemConfig { .artifacts_path .unwrap_or_else(|| self.get_chain_artifacts_path(name)), legacy_bridge: config.legacy_bridge, + evm_emulator: config.evm_emulator, }) } @@ -196,8 +197,8 @@ impl EcosystemConfig { pub fn get_wallets(&self) -> anyhow::Result { let path = self.config.join(WALLETS_FILE); - if let Ok(wallets) = WalletsConfig::read(self.get_shell(), &path) { - return Ok(wallets); + if self.get_shell().path_exists(&path) { + return WalletsConfig::read(self.get_shell(), &path); } if self.wallet_creation == WalletCreation::Localhost { // Use 0 id for ecosystem wallets @@ -232,7 +233,11 @@ impl EcosystemConfig { } pub fn get_default_configs_path(&self) -> PathBuf { - self.link_to_code.join(CONFIGS_PATH) + Self::default_configs_path(&self.link_to_code) + } + + pub fn default_configs_path(link_to_code: &Path) -> PathBuf { + link_to_code.join(CONFIGS_PATH) } /// Path to the predefined ecosystem configs diff --git a/zk_toolbox/crates/config/src/explorer.rs b/zkstack_cli/crates/config/src/explorer.rs similarity index 98% rename from zk_toolbox/crates/config/src/explorer.rs rename to zkstack_cli/crates/config/src/explorer.rs index ee7a59e5105..7ce9b986a1e 100644 --- a/zk_toolbox/crates/config/src/explorer.rs +++ b/zkstack_cli/crates/config/src/explorer.rs @@ -8,7 +8,7 @@ use crate::{ EXPLORER_CONFIG_FILE, EXPLORER_JS_CONFIG_FILE, LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, }, - traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{ReadConfig, SaveConfig, ZkStackConfig}, }; /// Explorer JSON configuration file. This file contains configuration for the explorer app. @@ -144,4 +144,4 @@ impl Default for ExplorerConfig { } } -impl ZkToolboxConfig for ExplorerConfig {} +impl ZkStackConfig for ExplorerConfig {} diff --git a/zk_toolbox/crates/config/src/explorer_compose.rs b/zkstack_cli/crates/config/src/explorer_compose.rs similarity index 98% rename from zk_toolbox/crates/config/src/explorer_compose.rs rename to zkstack_cli/crates/config/src/explorer_compose.rs index ca9abc1e3e2..13dd665d2e3 100644 --- a/zk_toolbox/crates/config/src/explorer_compose.rs +++ b/zkstack_cli/crates/config/src/explorer_compose.rs @@ -16,7 +16,7 @@ use crate::{ EXPLORER_WORKER_DOCKER_IMAGE, LOCAL_CHAINS_PATH, LOCAL_CONFIGS_PATH, }, docker_compose::{DockerComposeConfig, DockerComposeService}, - traits::ZkToolboxConfig, + traits::ZkStackConfig, EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, }; @@ -72,7 +72,7 @@ pub struct ExplorerBackendComposeConfig { pub docker_compose: DockerComposeConfig, } -impl ZkToolboxConfig for ExplorerBackendComposeConfig {} +impl ZkStackConfig for ExplorerBackendComposeConfig {} impl ExplorerBackendComposeConfig { const API_NAME: &'static str = "api"; diff --git a/zk_toolbox/crates/config/src/external_node.rs b/zkstack_cli/crates/config/src/external_node.rs similarity index 82% rename from zk_toolbox/crates/config/src/external_node.rs rename to zkstack_cli/crates/config/src/external_node.rs index a07ff5dc140..7d884d3e234 100644 --- a/zk_toolbox/crates/config/src/external_node.rs +++ b/zkstack_cli/crates/config/src/external_node.rs @@ -2,7 +2,7 @@ use std::path::Path; use xshell::Shell; pub use zksync_config::configs::en_config::ENConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::EN_CONFIG_FILE, @@ -23,6 +23,6 @@ impl SaveConfig for ENConfig { impl ReadConfig for ENConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/file_config.rs b/zkstack_cli/crates/config/src/file_config.rs similarity index 100% rename from zk_toolbox/crates/config/src/file_config.rs rename to zkstack_cli/crates/config/src/file_config.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs b/zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs similarity index 71% rename from zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs index 636cffc49f8..4f73483b393 100644 --- a/zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs @@ -1,9 +1,9 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; -impl ZkToolboxConfig for AcceptOwnershipInput {} +impl ZkStackConfig for AcceptOwnershipInput {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct AcceptOwnershipInput { diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs similarity index 96% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 21c67cde5f2..17b2bac38a3 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -10,7 +10,7 @@ use zksync_basic_types::L2ChainId; use crate::{ consts::INITIAL_DEPLOYMENT_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, ContractsConfig, GenesisConfig, WalletsConfig, ERC20_DEPLOYMENT_FILE, }; @@ -61,7 +61,7 @@ impl FileConfigWithDefaultName for InitialDeploymentConfig { const FILE_NAME: &'static str = INITIAL_DEPLOYMENT_FILE; } -impl ZkToolboxConfig for InitialDeploymentConfig {} +impl ZkStackConfig for InitialDeploymentConfig {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Erc20DeploymentConfig { @@ -72,7 +72,7 @@ impl FileConfigWithDefaultName for Erc20DeploymentConfig { const FILE_NAME: &'static str = ERC20_DEPLOYMENT_FILE; } -impl ZkToolboxConfig for Erc20DeploymentConfig {} +impl ZkStackConfig for Erc20DeploymentConfig {} impl Default for Erc20DeploymentConfig { fn default() -> Self { @@ -115,7 +115,7 @@ pub struct DeployL1Config { pub tokens: TokensDeployL1Config, } -impl ZkToolboxConfig for DeployL1Config {} +impl ZkStackConfig for DeployL1Config {} impl DeployL1Config { pub fn new( @@ -146,6 +146,7 @@ impl DeployL1Config { .diamond_init_minimal_l2_gas_price, bootloader_hash: genesis_config.bootloader_hash.unwrap(), default_aa_hash: genesis_config.default_aa_hash.unwrap(), + evm_emulator_hash: genesis_config.evm_emulator_hash, diamond_init_priority_tx_max_pubdata: initial_deployment_config .diamond_init_priority_tx_max_pubdata, diamond_init_pubdata_pricing_mode: initial_deployment_config @@ -194,6 +195,7 @@ pub struct ContractsDeployL1Config { pub diamond_init_minimal_l2_gas_price: u64, pub bootloader_hash: H256, pub default_aa_hash: H256, + pub evm_emulator_hash: Option, } #[derive(Debug, Deserialize, Serialize, Clone)] @@ -210,7 +212,7 @@ pub struct DeployErc20Config { pub additional_addresses_for_minting: Vec
, } -impl ZkToolboxConfig for DeployErc20Config {} +impl ZkStackConfig for DeployErc20Config {} impl DeployErc20Config { pub fn new( diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs similarity index 95% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs index afda8d30988..31f0ae2ddaa 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{ consts::ERC20_CONFIGS_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Deserialize, Serialize, Clone)] @@ -37,7 +37,7 @@ pub struct DeployL1DeployedAddressesOutput { pub native_token_vault_addr: Address, } -impl ZkToolboxConfig for DeployL1Output {} +impl ZkStackConfig for DeployL1Output {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct DeployL1ContractsConfigOutput { @@ -99,4 +99,4 @@ impl FileConfigWithDefaultName for ERC20Tokens { const FILE_NAME: &'static str = ERC20_CONFIGS_FILE; } -impl ZkToolboxConfig for ERC20Tokens {} +impl ZkStackConfig for ERC20Tokens {} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs similarity index 98% rename from zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs index 88db31b7563..bcc747d797c 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs @@ -5,7 +5,7 @@ use zksync_basic_types::{H256, U256}; use zksync_config::GenesisConfig; use crate::{ - forge_interface::deploy_ecosystem::input::InitialDeploymentConfig, traits::ZkToolboxConfig, + forge_interface::deploy_ecosystem::input::InitialDeploymentConfig, traits::ZkStackConfig, ChainConfig, ContractsConfig, EcosystemConfig, }; @@ -49,7 +49,7 @@ pub struct DeployGatewayCTMInput { force_deployments_data: String, } -impl ZkToolboxConfig for DeployGatewayCTMInput {} +impl ZkStackConfig for DeployGatewayCTMInput {} impl DeployGatewayCTMInput { pub fn new( diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs similarity index 92% rename from zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs index 9cbec63f0b9..33661fb6ebe 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs @@ -1,7 +1,7 @@ use ethers::abi::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployGatewayCTMOutput { @@ -12,7 +12,7 @@ pub struct DeployGatewayCTMOutput { pub diamond_cut_data: String, } -impl ZkToolboxConfig for DeployGatewayCTMOutput {} +impl ZkStackConfig for DeployGatewayCTMOutput {} #[derive(Debug, Serialize, Deserialize, Clone)] pub struct StateTransitionDeployedAddresses { diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs similarity index 92% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs index 68b637c2d52..87014baa755 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs @@ -3,9 +3,9 @@ use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; +use crate::{traits::ZkStackConfig, ChainConfig, ContractsConfig}; -impl ZkToolboxConfig for DeployL2ContractsInput {} +impl ZkStackConfig for DeployL2ContractsInput {} /// Fields corresponding to `contracts/l1-contracts/deploy-script-config-template/config-deploy-l2-config.toml` /// which are read by `contracts/l1-contracts/deploy-scripts/DeployL2Contracts.sol`. diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs similarity index 72% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index 623eb9d4d65..508e349f5ed 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -1,12 +1,12 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; -impl ZkToolboxConfig for InitializeBridgeOutput {} -impl ZkToolboxConfig for DefaultL2UpgradeOutput {} -impl ZkToolboxConfig for ConsensusRegistryOutput {} -impl ZkToolboxConfig for Multicall3Output {} +impl ZkStackConfig for InitializeBridgeOutput {} +impl ZkStackConfig for DefaultL2UpgradeOutput {} +impl ZkStackConfig for ConsensusRegistryOutput {} +impl ZkStackConfig for Multicall3Output {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { diff --git a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/input.rs b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/input.rs similarity index 95% rename from zk_toolbox/crates/config/src/forge_interface/gateway_preparation/input.rs rename to zkstack_cli/crates/config/src/forge_interface/gateway_preparation/input.rs index 3689c64f0ff..a958915fd9b 100644 --- a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/input.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::Bytes, Address}; use zksync_config::configs::GatewayConfig; -use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; +use crate::{traits::ZkStackConfig, ChainConfig, ContractsConfig}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GatewayPreparationConfig { @@ -20,7 +20,8 @@ pub struct GatewayPreparationConfig { pub access_control_restriction: Address, pub l1_nullifier_proxy_addr: Address, } -impl ZkToolboxConfig for GatewayPreparationConfig {} + +impl ZkStackConfig for GatewayPreparationConfig {} impl GatewayPreparationConfig { pub fn new( diff --git a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/mod.rs b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/gateway_preparation/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/gateway_preparation/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/output.rs b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/output.rs similarity index 78% rename from zk_toolbox/crates/config/src/forge_interface/gateway_preparation/output.rs rename to zkstack_cli/crates/config/src/forge_interface/gateway_preparation/output.rs index 72373eebc53..7160a0af4c8 100644 --- a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/output.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{Address, H256}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GatewayPreparationOutput { @@ -10,4 +10,4 @@ pub struct GatewayPreparationOutput { pub gateway_transaction_filterer_proxy: Address, } -impl ZkToolboxConfig for GatewayPreparationOutput {} +impl ZkStackConfig for GatewayPreparationOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/mod.rs b/zkstack_cli/crates/config/src/forge_interface/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs b/zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs similarity index 83% rename from zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs index 9631fe74318..2af7502e0b7 100644 --- a/zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs @@ -2,7 +2,7 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig}; +use crate::{traits::ZkStackConfig, ChainConfig}; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployPaymasterInput { @@ -22,11 +22,11 @@ impl DeployPaymasterInput { } } -impl ZkToolboxConfig for DeployPaymasterInput {} +impl ZkStackConfig for DeployPaymasterInput {} #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployPaymasterOutput { pub paymaster: Address, } -impl ZkToolboxConfig for DeployPaymasterOutput {} +impl ZkStackConfig for DeployPaymasterOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs similarity index 97% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs index bb6d61c6f8d..8689bb496c6 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; +use crate::{traits::ZkStackConfig, ChainConfig, ContractsConfig}; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct RegisterChainL1Config { @@ -59,7 +59,7 @@ pub struct ChainL1Config { pub governance_min_delay: u64, } -impl ZkToolboxConfig for RegisterChainL1Config {} +impl ZkStackConfig for RegisterChainL1Config {} impl RegisterChainL1Config { pub fn new(chain_config: &ChainConfig, contracts: &ContractsConfig) -> anyhow::Result { diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/mod.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs similarity index 82% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs index 2281e8fc2d5..9d399ce3c25 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs @@ -1,7 +1,7 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct RegisterChainOutput { @@ -13,4 +13,4 @@ pub struct RegisterChainOutput { pub chain_proxy_admin_addr: Address, } -impl ZkToolboxConfig for RegisterChainOutput {} +impl ZkStackConfig for RegisterChainOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/script_params.rs b/zkstack_cli/crates/config/src/forge_interface/script_params.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/script_params.rs rename to zkstack_cli/crates/config/src/forge_interface/script_params.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs b/zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs similarity index 86% rename from zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs index e8189c521fb..201cf86b734 100644 --- a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{Address, L2ChainId, H256}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SetupLegacyBridgeInput { @@ -17,4 +17,4 @@ pub struct SetupLegacyBridgeInput { pub create2factory_addr: Address, } -impl ZkToolboxConfig for SetupLegacyBridgeInput {} +impl ZkStackConfig for SetupLegacyBridgeInput {} diff --git a/zk_toolbox/crates/config/src/gateway.rs b/zkstack_cli/crates/config/src/gateway.rs similarity index 92% rename from zk_toolbox/crates/config/src/gateway.rs rename to zkstack_cli/crates/config/src/gateway.rs index 37410b9a5e6..67b5ad327cc 100644 --- a/zk_toolbox/crates/config/src/gateway.rs +++ b/zkstack_cli/crates/config/src/gateway.rs @@ -3,7 +3,7 @@ use zksync_config::configs::{gateway::GatewayChainConfig, GatewayConfig}; use crate::{ forge_interface::deploy_gateway_ctm::output::DeployGatewayCTMOutput, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, GATEWAY_CHAIN_FILE, GATEWAY_FILE, }; @@ -11,7 +11,7 @@ impl FileConfigWithDefaultName for GatewayConfig { const FILE_NAME: &'static str = GATEWAY_FILE; } -impl ZkToolboxConfig for GatewayConfig {} +impl ZkStackConfig for GatewayConfig {} impl From for GatewayConfig { fn from(output: DeployGatewayCTMOutput) -> Self { @@ -43,4 +43,4 @@ impl FileConfigWithDefaultName for GatewayChainConfig { const FILE_NAME: &'static str = GATEWAY_CHAIN_FILE; } -impl ZkToolboxConfig for GatewayChainConfig {} +impl ZkStackConfig for GatewayChainConfig {} diff --git a/zk_toolbox/crates/config/src/general.rs b/zkstack_cli/crates/config/src/general.rs similarity index 96% rename from zk_toolbox/crates/config/src/general.rs rename to zkstack_cli/crates/config/src/general.rs index a8e7407edd0..0079105b66c 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zkstack_cli/crates/config/src/general.rs @@ -6,7 +6,7 @@ use url::Url; use xshell::Shell; use zksync_config::configs::object_store::ObjectStoreMode; pub use zksync_config::configs::GeneralConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENERAL_FILE, @@ -137,7 +137,7 @@ impl SaveConfig for GeneralConfig { impl ReadConfig for GeneralConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/genesis.rs b/zkstack_cli/crates/config/src/genesis.rs similarity index 64% rename from zk_toolbox/crates/config/src/genesis.rs rename to zkstack_cli/crates/config/src/genesis.rs index a6469893fed..2d9ac7fcdc6 100644 --- a/zk_toolbox/crates/config/src/genesis.rs +++ b/zkstack_cli/crates/config/src/genesis.rs @@ -1,9 +1,10 @@ use std::path::Path; +use anyhow::Context as _; use xshell::Shell; use zksync_basic_types::L1ChainId; pub use zksync_config::GenesisConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENESIS_FILE, @@ -11,11 +12,23 @@ use crate::{ ChainConfig, }; -pub fn update_from_chain_config(genesis: &mut GenesisConfig, config: &ChainConfig) { +pub fn update_from_chain_config( + genesis: &mut GenesisConfig, + config: &ChainConfig, +) -> anyhow::Result<()> { genesis.l2_chain_id = config.chain_id; // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network genesis.l1_chain_id = L1ChainId(config.l1_network.chain_id()); genesis.l1_batch_commit_data_generator_mode = config.l1_batch_commit_data_generator_mode; + genesis.evm_emulator_hash = if config.evm_emulator { + Some(genesis.evm_emulator_hash.context( + "impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash", + )?) + } else { + None + }; + Ok(()) } impl FileConfigWithDefaultName for GenesisConfig { @@ -32,6 +45,6 @@ impl SaveConfig for GenesisConfig { impl ReadConfig for GenesisConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/lib.rs b/zkstack_cli/crates/config/src/lib.rs similarity index 90% rename from zk_toolbox/crates/config/src/lib.rs rename to zkstack_cli/crates/config/src/lib.rs index 53ac423b823..4d4fb8da61d 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zkstack_cli/crates/config/src/lib.rs @@ -10,7 +10,7 @@ pub use manipulations::*; pub use secrets::*; pub use wallet_creation::*; pub use wallets::*; -pub use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +pub use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; mod apps; mod chain; diff --git a/zk_toolbox/crates/config/src/manipulations.rs b/zkstack_cli/crates/config/src/manipulations.rs similarity index 100% rename from zk_toolbox/crates/config/src/manipulations.rs rename to zkstack_cli/crates/config/src/manipulations.rs diff --git a/zk_toolbox/crates/config/src/portal.rs b/zkstack_cli/crates/config/src/portal.rs similarity index 98% rename from zk_toolbox/crates/config/src/portal.rs rename to zkstack_cli/crates/config/src/portal.rs index c787c6cc702..2b6f0ffd515 100644 --- a/zk_toolbox/crates/config/src/portal.rs +++ b/zkstack_cli/crates/config/src/portal.rs @@ -9,7 +9,7 @@ use crate::{ LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, PORTAL_CONFIG_FILE, PORTAL_JS_CONFIG_FILE, }, - traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{ReadConfig, SaveConfig, ZkStackConfig}, }; /// Portal JSON configuration file. This file contains configuration for the portal app. @@ -172,4 +172,4 @@ impl Default for PortalConfig { } } -impl ZkToolboxConfig for PortalConfig {} +impl ZkStackConfig for PortalConfig {} diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zkstack_cli/crates/config/src/secrets.rs similarity index 80% rename from zk_toolbox/crates/config/src/secrets.rs rename to zkstack_cli/crates/config/src/secrets.rs index f0a39148b03..cf0a9927c56 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zkstack_cli/crates/config/src/secrets.rs @@ -5,24 +5,22 @@ use common::db::DatabaseConfig; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; pub use zksync_config::configs::Secrets as SecretsConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::SECRETS_FILE, traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, }; -pub fn set_databases( +pub fn set_server_database( secrets: &mut SecretsConfig, server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, ) -> anyhow::Result<()> { let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Server database must be presented")?; database.server_url = Some(SensitiveUrl::from(server_db_config.full_url())); - database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } @@ -33,7 +31,7 @@ pub fn set_prover_database( let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Prover database must be presented")?; database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } @@ -61,6 +59,6 @@ impl SaveConfig for SecretsConfig { impl ReadConfig for SecretsConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/traits.rs b/zkstack_cli/crates/config/src/traits.rs similarity index 95% rename from zk_toolbox/crates/config/src/traits.rs rename to zkstack_cli/crates/config/src/traits.rs index bb0722762e3..a4a4ad22c61 100644 --- a/zk_toolbox/crates/config/src/traits.rs +++ b/zkstack_cli/crates/config/src/traits.rs @@ -8,8 +8,8 @@ use serde::{de::DeserializeOwned, Serialize}; use url::Url; use xshell::Shell; -// Configs that we use only inside zk toolbox, we don't have protobuf implementation for them. -pub trait ZkToolboxConfig {} +// Configs that we use only inside ZK Stack CLI, we don't have protobuf implementation for them. +pub trait ZkStackConfig {} pub trait FileConfigWithDefaultName { const FILE_NAME: &'static str; @@ -19,7 +19,7 @@ pub trait FileConfigWithDefaultName { } } -impl SaveConfig for T { +impl SaveConfig for T { fn save(&self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { save_with_comment(shell, path, self, "") } @@ -49,7 +49,7 @@ pub trait ReadConfig: Sized { impl ReadConfig for T where - T: DeserializeOwned + Clone + ZkToolboxConfig, + T: DeserializeOwned + Clone + ZkStackConfig, { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let error_context = || format!("Failed to parse config file {:?}.", path.as_ref()); diff --git a/zk_toolbox/crates/config/src/wallet_creation.rs b/zkstack_cli/crates/config/src/wallet_creation.rs similarity index 100% rename from zk_toolbox/crates/config/src/wallet_creation.rs rename to zkstack_cli/crates/config/src/wallet_creation.rs diff --git a/zk_toolbox/crates/config/src/wallets.rs b/zkstack_cli/crates/config/src/wallets.rs similarity index 75% rename from zk_toolbox/crates/config/src/wallets.rs rename to zkstack_cli/crates/config/src/wallets.rs index 9c87453954e..735848f6e34 100644 --- a/zk_toolbox/crates/config/src/wallets.rs +++ b/zkstack_cli/crates/config/src/wallets.rs @@ -1,11 +1,10 @@ use common::wallets::Wallet; -use ethers::types::H256; -use rand::Rng; +use rand::{CryptoRng, Rng}; use serde::{Deserialize, Serialize}; use crate::{ consts::WALLETS_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Clone, Serialize, Deserialize)] @@ -20,7 +19,7 @@ pub struct WalletsConfig { impl WalletsConfig { /// Generate random wallets - pub fn random(rng: &mut impl Rng) -> Self { + pub fn random(rng: &mut (impl CryptoRng + Rng)) -> Self { Self { deployer: Some(Wallet::random(rng)), operator: Wallet::random(rng), @@ -42,13 +41,6 @@ impl WalletsConfig { token_multiplier_setter: Some(Wallet::empty()), } } - pub fn deployer_private_key(&self) -> Option { - self.deployer.as_ref().and_then(|wallet| wallet.private_key) - } - - pub fn governor_private_key(&self) -> Option { - self.governor.private_key - } } impl FileConfigWithDefaultName for WalletsConfig { @@ -63,6 +55,6 @@ pub(crate) struct EthMnemonicConfig { pub(crate) base_path: String, } -impl ZkToolboxConfig for EthMnemonicConfig {} +impl ZkStackConfig for EthMnemonicConfig {} -impl ZkToolboxConfig for WalletsConfig {} +impl ZkStackConfig for WalletsConfig {} diff --git a/zk_toolbox/crates/git_version_macro/Cargo.toml b/zkstack_cli/crates/git_version_macro/Cargo.toml similarity index 100% rename from zk_toolbox/crates/git_version_macro/Cargo.toml rename to zkstack_cli/crates/git_version_macro/Cargo.toml diff --git a/zk_toolbox/crates/git_version_macro/src/lib.rs b/zkstack_cli/crates/git_version_macro/src/lib.rs similarity index 100% rename from zk_toolbox/crates/git_version_macro/src/lib.rs rename to zkstack_cli/crates/git_version_macro/src/lib.rs diff --git a/zk_toolbox/crates/types/Cargo.toml b/zkstack_cli/crates/types/Cargo.toml similarity index 100% rename from zk_toolbox/crates/types/Cargo.toml rename to zkstack_cli/crates/types/Cargo.toml diff --git a/zk_toolbox/crates/types/src/base_token.rs b/zkstack_cli/crates/types/src/base_token.rs similarity index 100% rename from zk_toolbox/crates/types/src/base_token.rs rename to zkstack_cli/crates/types/src/base_token.rs diff --git a/zk_toolbox/crates/types/src/l1_network.rs b/zkstack_cli/crates/types/src/l1_network.rs similarity index 100% rename from zk_toolbox/crates/types/src/l1_network.rs rename to zkstack_cli/crates/types/src/l1_network.rs diff --git a/zk_toolbox/crates/types/src/lib.rs b/zkstack_cli/crates/types/src/lib.rs similarity index 71% rename from zk_toolbox/crates/types/src/lib.rs rename to zkstack_cli/crates/types/src/lib.rs index 8b647057105..075e39345bc 100644 --- a/zk_toolbox/crates/types/src/lib.rs +++ b/zkstack_cli/crates/types/src/lib.rs @@ -10,5 +10,5 @@ pub use prover_mode::*; pub use token_info::*; pub use wallet_creation::*; pub use zksync_basic_types::{ - commitment::L1BatchCommitmentMode, protocol_version::ProtocolSemanticVersion, + commitment::L1BatchCommitmentMode, parse_h256, protocol_version::ProtocolSemanticVersion, }; diff --git a/zk_toolbox/crates/types/src/prover_mode.rs b/zkstack_cli/crates/types/src/prover_mode.rs similarity index 100% rename from zk_toolbox/crates/types/src/prover_mode.rs rename to zkstack_cli/crates/types/src/prover_mode.rs diff --git a/zk_toolbox/crates/types/src/token_info.rs b/zkstack_cli/crates/types/src/token_info.rs similarity index 100% rename from zk_toolbox/crates/types/src/token_info.rs rename to zkstack_cli/crates/types/src/token_info.rs diff --git a/zk_toolbox/crates/types/src/wallet_creation.rs b/zkstack_cli/crates/types/src/wallet_creation.rs similarity index 100% rename from zk_toolbox/crates/types/src/wallet_creation.rs rename to zkstack_cli/crates/types/src/wallet_creation.rs diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zkstack_cli/crates/zkstack/Cargo.toml similarity index 69% rename from zk_toolbox/crates/zk_inception/Cargo.toml rename to zkstack_cli/crates/zkstack/Cargo.toml index c95b2256f58..0a66036854e 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zkstack_cli/crates/zkstack/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "zk_inception" +name = "zkstack" version = "0.1.0" edition.workspace = true homepage.workspace = true @@ -12,34 +12,51 @@ keywords.workspace = true [dependencies] anyhow.workspace = true +chrono.workspace = true clap.workspace = true +clap_complete.workspace = true +clap-markdown.workspace = true cliclack.workspace = true +common.workspace = true config.workspace = true +dirs.workspace = true +ethers.workspace = true +futures.workspace = true human-panic.workspace = true lazy_static.workspace = true -serde_yaml.workspace = true +secrecy.workspace = true serde.workspace = true serde_json.workspace = true -xshell.workspace = true -ethers.workspace = true -common.workspace = true -tokio.workspace = true -types.workspace = true +serde_yaml.workspace = true +slugify-rs.workspace = true strum.workspace = true +sqruff-lib = "0.19.0" +thiserror.workspace = true +tokio.workspace = true toml.workspace = true +types.workspace = true url.workspace = true -thiserror.workspace = true -zksync_config.workspace = true -zksync_system_constants.workspace = true -slugify-rs.workspace = true +xshell.workspace = true zksync_basic_types.workspace = true -clap-markdown.workspace = true +zksync_config.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_crypto.workspace = true +zksync_protobuf.workspace = true +zksync_protobuf_config.workspace = true zksync_types.workspace = true zksync_web3_decl.workspace = true -secrecy.workspace = true +zksync_system_constants.workspace = true +prost.workspace = true +reqwest = "0.12.8" + +[dev-dependencies] +rand.workspace = true +zksync_consensus_utils.workspace = true [build-dependencies] -eyre.workspace = true +anyhow.workspace = true +clap_complete.workspace = true +dirs.workspace = true ethers.workspace = true +xshell.workspace = true +zksync_protobuf_build.workspace = true diff --git a/zk_toolbox/crates/zk_inception/README.md b/zkstack_cli/crates/zkstack/README.md similarity index 89% rename from zk_toolbox/crates/zk_inception/README.md rename to zkstack_cli/crates/zkstack/README.md index 904b1421e3a..f352d96fec4 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zkstack_cli/crates/zkstack/README.md @@ -17,8 +17,12 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception chain initialize-bridges`↴](#zk_inception-chain-initialize-bridges) - [`zk_inception chain deploy-l2-contracts`↴](#zk_inception-chain-deploy-l2-contracts) - [`zk_inception chain upgrader`↴](#zk_inception-chain-upgrader) +- [`zk_inception chain deploy-consensus-registry`↴](#zk_inception-chain-deploy-consensus-registry) +- [`zk_inception chain deploy-multicall3`↴](#zk_inception-chain-deploy-multicall3) - [`zk_inception chain deploy-paymaster`↴](#zk_inception-chain-deploy-paymaster) - [`zk_inception chain update-token-multiplier-setter`↴](#zk_inception-chain-update-token-multiplier-setter) +- [`zk_inception consensus set-attester-committee`↴](#zk_inception-consensus-set-attester-committee) +- [`zk_inception consensus get-attester-committee`↴](#zk_inception-consensus-get-attester-committee) - [`zk_inception prover`↴](#zk_inception-prover) - [`zk_inception prover init`↴](#zk_inception-prover-init) - [`zk_inception prover setup-keys`↴](#zk_inception-prover-setup-keys) @@ -38,7 +42,7 @@ This document contains the help content for the `zk_inception` command-line prog ## `zk_inception` -ZK Toolbox is a set of tools for working with zk stack. +ZK Stack CLI is a set of tools for working with zk stack. **Usage:** `zk_inception [OPTIONS] ` @@ -364,6 +368,18 @@ Deploy Default Upgrader e.g.: `zk_inception init -a --private-key=` +## `zk_inception chain deploy-consensus-registry` + +Deploy Consensus Registry smart contract + +**Usage:** `zk_inception chain deploy-consensus-registry` + +## `zk_inception chain deploy-multicall3` + +Deploy Multicall3 smart contract + +**Usage:** `zk_inception chain deploy-multicall3` + ## `zk_inception chain deploy-paymaster` Deploy paymaster smart contract @@ -414,6 +430,47 @@ Update Token Multiplier Setter address on L1 e.g.: `zk_inception init -a --private-key=` +## `zk_inception consensus` + +Consensus related commands + +**Usage:** `zk_inception consensus ` + +###### **Subcommands:** + +- `set-attester-committee` — Set attester committee +- `get-attester-committee` — Get attester committee + +## `zk_inception consensus set-attester-committee` + +Set attester committee in the consensus registry smart contract. Requires `consensus_registry` and `multicall3` +contracts to be deployed. + +**Usage:** `zk_inception consensus set-attester-committee [OPTIONS]` + +###### **Options:** + +- `--from-genesis` — Set attester committee to `consensus.genesis_spec.attesters` in general.yaml Mutually exclusive + with `--from-file`. +- `--from-file ` — Set attester committee to committee specified in yaml file at `PATH`. + Mutually exclusive with `--from-genesis`. File format is specified in + `zk_inception/src/commands/consensus/proto/mod.proto`. Example: + + ```yaml + attesters: + - key: attester:public:secp256k1:0339d4b0cdd9896d3929631a4e5e9a5b4919f52592bec571d70bb0e50a3a824714 + weight: 1 + - key: attester:public:secp256k1:024897d8c10d7a57d108cfe2a724d7824c657f219ef5d9f7674810a6746c19fa7b + weight: 1 + ``` + +## `zk_inception consensus get-attester-committee` + +Requires `consensus_registry` and `multicall3` contracts to be deployed. Fetches attester committee from the consensus +registry contract and prints it. + +**Usage:** `zk_inception consensus get-attester-committee` + ## `zk_inception prover` Prover related commands @@ -423,7 +480,6 @@ Prover related commands ###### **Subcommands:** - `init` — Initialize prover -- `generate-sk` — Generate setup keys - `run` — Run prover - `init-bellman-cuda` — Initialize bellman-cuda @@ -452,7 +508,11 @@ Initialize prover - `--public-location ` - `--public-project-id ` - `--bellman-cuda-dir ` -- `--download-key ` +- `--bellman-cuda` + + Possible values: `true`, `false` + +- `--setup-compressor-key ` Possible values: `true`, `false` @@ -508,6 +568,10 @@ Run prover Possible values: `true`, `false` +- `--tag' - Tag of the docker image to run. + + Default value is `latest2.0` but you can specify your prefered one. + - `--round ` Possible values: `all-rounds`, `basic-circuits`, `leaf-aggregation`, `node-aggregation`, `recursion-tip`, `scheduler` diff --git a/zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json b/zkstack_cli/crates/zkstack/abi/ConsensusRegistry.json similarity index 100% rename from zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json rename to zkstack_cli/crates/zkstack/abi/ConsensusRegistry.json diff --git a/zkstack_cli/crates/zkstack/build.rs b/zkstack_cli/crates/zkstack/build.rs new file mode 100644 index 00000000000..e52e952bf73 --- /dev/null +++ b/zkstack_cli/crates/zkstack/build.rs @@ -0,0 +1,149 @@ +use std::path::{Path, PathBuf}; + +use anyhow::{anyhow, Context}; +use ethers::contract::Abigen; +use xshell::{cmd, Shell}; + +const COMPLETION_DIR: &str = "completion"; + +fn main() -> anyhow::Result<()> { + let outdir = PathBuf::from(std::env::var("OUT_DIR")?).canonicalize()?; + Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json") + .map_err(|_| anyhow!("Failed ABI deserialization"))? + .generate() + .map_err(|_| anyhow!("Failed ABI generation"))? + .write_to_file(outdir.join("consensus_registry_abi.rs")) + .context("Failed to write ABI to file")?; + + if let Err(e) = build_dependencies() { + println!("cargo:error=It was not possible to install projects dependencies"); + println!("cargo:error={}", e); + } + + if let Err(e) = configure_shell_autocompletion() { + println!("cargo:warning=It was not possible to install autocomplete scripts. Please generate them manually with `zkstack autocomplete`"); + println!("cargo:error={}", e); + }; + + zksync_protobuf_build::Config { + input_root: "src/commands/consensus/proto".into(), + proto_root: "zksync/toolbox/consensus".into(), + dependencies: vec!["::zksync_protobuf_config::proto".parse().unwrap()], + protobuf_crate: "::zksync_protobuf".parse().unwrap(), + is_public: false, + } + .generate() + .unwrap(); + Ok(()) +} + +fn configure_shell_autocompletion() -> anyhow::Result<()> { + // Array of supported shells + let shells = [ + clap_complete::Shell::Bash, + clap_complete::Shell::Fish, + clap_complete::Shell::Zsh, + ]; + + for shell in shells { + std::fs::create_dir_all(&shell.autocomplete_folder()?) + .context("it was impossible to create the configuration directory")?; + + let src = Path::new(COMPLETION_DIR).join(shell.autocomplete_file_name()?); + let dst = shell + .autocomplete_folder()? + .join(shell.autocomplete_file_name()?); + + std::fs::copy(src, dst)?; + + shell + .configure_autocomplete() + .context("failed to run extra configuration requirements")?; + } + + Ok(()) +} + +pub trait ShellAutocomplete { + fn autocomplete_folder(&self) -> anyhow::Result; + fn autocomplete_file_name(&self) -> anyhow::Result; + /// Extra steps required for shells enable command autocomplete. + fn configure_autocomplete(&self) -> anyhow::Result<()>; +} + +impl ShellAutocomplete for clap_complete::Shell { + fn autocomplete_folder(&self) -> anyhow::Result { + let home_dir = dirs::home_dir().context("missing home folder")?; + + match self { + clap_complete::Shell::Bash => Ok(home_dir.join(".bash_completion.d")), + clap_complete::Shell::Fish => Ok(home_dir.join(".config/fish/completions")), + clap_complete::Shell::Zsh => Ok(home_dir.join(".zsh/completion")), + _ => anyhow::bail!("unsupported shell"), + } + } + + fn autocomplete_file_name(&self) -> anyhow::Result { + let crate_name = env!("CARGO_PKG_NAME"); + + match self { + clap_complete::Shell::Bash => Ok(format!("{}.sh", crate_name)), + clap_complete::Shell::Fish => Ok(format!("{}.fish", crate_name)), + clap_complete::Shell::Zsh => Ok(format!("_{}.zsh", crate_name)), + _ => anyhow::bail!("unsupported shell"), + } + } + + fn configure_autocomplete(&self) -> anyhow::Result<()> { + match self { + clap_complete::Shell::Bash | clap_complete::Shell::Zsh => { + let shell = &self.to_string().to_lowercase(); + let completion_file = self + .autocomplete_folder()? + .join(self.autocomplete_file_name()?); + + // Source the completion file inside .{shell}rc + let shell_rc = dirs::home_dir() + .context("missing home directory")? + .join(format!(".{}rc", shell)); + + if shell_rc.exists() { + let shell_rc_content = std::fs::read_to_string(&shell_rc) + .context(format!("could not read .{}rc", shell))?; + + if !shell_rc_content.contains("# zkstack completion") { + std::fs::write( + shell_rc, + format!( + "{}\n# zkstack completion\nsource \"{}\"\n", + shell_rc_content, + completion_file.to_str().unwrap() + ), + ) + .context(format!("could not write .{}rc", shell))?; + } + } else { + println!( + "cargo:warning=Please add the following line to your .{}rc:", + shell + ); + println!("cargo:warning=source {}", completion_file.to_str().unwrap()); + } + } + _ => (), + } + + Ok(()) + } +} + +fn build_dependencies() -> anyhow::Result<()> { + let shell = Shell::new()?; + let code_dir = Path::new("../"); + + let _dir_guard = shell.push_dir(code_dir); + + cmd!(shell, "yarn install") + .run() + .context("Failed to install dependencies") +} diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh new file mode 100644 index 00000000000..3ea3980e68f --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -0,0 +1,5151 @@ +#compdef zkstack + +autoload -U is-at-least + +_zkstack() { + typeset -A opt_args + typeset -a _arguments_options + local ret=1 + + if is-at-least 5.2; then + _arguments_options=(-s -S -C) + else + _arguments_options=(-s -C) + fi + + local context curcontext="$curcontext" state line + _arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +'-V[Print version]' \ +'--version[Print version]' \ +":: :_zkstack_commands" \ +"*::: :->zkstack" \ +&& ret=0 + case $state in + (zkstack) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-command-$line[1]:" + case $line[1] in + (autocomplete) +_arguments "${_arguments_options[@]}" : \ +'--generate=[The shell to generate the autocomplete script for]:GENERATOR:(bash elvish fish powershell zsh)' \ +'-o+[The out directory to write the autocomplete script to]:OUT:_files' \ +'--out=[The out directory to write the autocomplete script to]:OUT:_files' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(ecosystem) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__ecosystem_commands" \ +"*::: :->ecosystem" \ +&& ret=0 + + case $state in + (ecosystem) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-ecosystem-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--ecosystem-name=[]:ECOSYSTEM_NAME: ' \ +'--l1-network=[L1 Network]:L1_NETWORK:(localhost sepolia holesky mainnet)' \ +'--link-to-code=[Code link]:LINK_TO_CODE:_files -/' \ +'--chain-name=[]:CHAIN_NAME: ' \ +'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ +'--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" +random\:"Generate random wallets" +empty\:"Generate placeholder wallets" +in-file\:"Specify file with wallets"))' \ +'--wallet-path=[Wallet path]:WALLET_PATH:_files' \ +'--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ +'--start-containers=[Start reth and postgres containers after creation]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--legacy-bridge[]' \ +'--skip-submodules-checkout[Skip submodules checkout]' \ +'--skip-contract-compilation-override[Skip contract compilation override]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +'--sender=[Address of the transaction sender]:SENDER: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'-o+[Output directory for the generated files]:OUT:_files' \ +'--out=[Output directory for the generated files]:OUT:_files' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--deploy-erc20=[Deploy ERC20 contracts]' \ +'--deploy-ecosystem=[Deploy ecosystem contracts]' \ +'--ecosystem-contracts-path=[Path to ecosystem contracts]:ECOSYSTEM_CONTRACTS_PATH:_files' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--deploy-paymaster=[Deploy Paymaster contract]' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'-o+[Enable Grafana]' \ +'--observability=[Enable Grafana]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-d[]' \ +'--dont-drop[]' \ +'--ecosystem-only[Initialize ecosystem only and skip chain initialization (chain can be initialized later with \`chain init\` subcommand)]' \ +'--dev[Use defaults for all options and flags. Suitable for local development]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'--skip-submodules-checkout[Skip submodules checkout]' \ +'--skip-contract-compilation-override[Skip contract compilation override]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +'::name:' \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__ecosystem__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-ecosystem-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(chain) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__chain_commands" \ +"*::: :->chain" \ +&& ret=0 + + case $state in + (chain) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--chain-name=[]:CHAIN_NAME: ' \ +'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ +'--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" +random\:"Generate random wallets" +empty\:"Generate placeholder wallets" +in-file\:"Specify file with wallets"))' \ +'--wallet-path=[Wallet path]:WALLET_PATH:_files' \ +'--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--legacy-bridge[]' \ +'--skip-submodules-checkout[Skip submodules checkout]' \ +'--skip-contract-compilation-override[Skip contract compilation override]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +'-o+[Output directory for the generated files]:OUT:_files' \ +'--out=[Output directory for the generated files]:OUT:_files' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--deploy-paymaster=[]' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-d[]' \ +'--dont-drop[]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'--dev[Use defaults for all options and flags. Suitable for local development]' \ +'--skip-submodules-checkout[Skip submodules checkout]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +":: :_zkstack__chain__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__init__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-init-help-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__chain__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__genesis__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-genesis-help-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(convert-to-gateway) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(migrate-to-gateway) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--gateway-chain-name=[]:GATEWAY_CHAIN_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(migrate-from-gateway) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--gateway-chain-name=[]:GATEWAY_CHAIN_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(convert-to-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate-to-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate-from-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(dev) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev_commands" \ +"*::: :->dev" \ +&& ret=0 + + case $state in + (dev) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +'--database=[Database to create new migration for]:DATABASE:(prover core)' \ +'--name=[Migration name]:NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__database__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-database-help-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +'-t+[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ +'--test-pattern=[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-e[Run tests for external node]' \ +'--external-node[Run tests for external node]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'--enable-consensus[Enable consensus]' \ +'-e[Run tests for external node]' \ +'--external-node[Run tests for external node]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-s[Run recovery from a snapshot instead of genesis]' \ +'--snapshot[Run recovery from a snapshot instead of genesis]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +'--options=[Cargo test flags]:OPTIONS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__test__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-test-help-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__clean__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-clean-help-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__snapshot__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-snapshot-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +'*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-c[]' \ +'--check[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-c[]' \ +'--check[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +'*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__fmt__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-fmt-help-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +'--number=[]:NUMBER: ' \ +'--version=[]:VERSION: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--default[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +'--version=[]:VERSION: ' \ +'--snark-wrapper=[]:SNARK_WRAPPER: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--default[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__prover__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-prover-help-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +'--l1-contracts=[Build L1 contracts]' \ +'--l1-da-contracts=[Build L1 DA contracts]' \ +'--l2-contracts=[Build L2 contracts]' \ +'--system-contracts=[Build system contracts]' \ +'--test-contracts=[Build test contracts]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +'-p+[Path to the config file to override]:PATH: ' \ +'--path=[Path to the config file to override]:PATH: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +'--file=[]:FILE:_files' \ +'--private-key=[]:PRIVATE_KEY: ' \ +'--l1-rpc-url=[]:L1_RPC_URL: ' \ +'--confirmations=[]:CONFIRMATIONS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +'-u+[URL of the health check endpoint]:URL: ' \ +'--url=[URL of the health check endpoint]:URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__status__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-status-help-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-prover-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +'--proof-store-dir=[]:PROOF_STORE_DIR: ' \ +'--bucket-base-url=[]:BUCKET_BASE_URL: ' \ +'--credentials-file=[]:CREDENTIALS_FILE: ' \ +'--bucket-name=[]:BUCKET_NAME: ' \ +'--location=[]:LOCATION: ' \ +'--project-id=[]:PROJECT_ID: ' \ +'--shall-save-to-public-bucket=[]:SHALL_SAVE_TO_PUBLIC_BUCKET:(true false)' \ +'--public-store-dir=[]:PUBLIC_STORE_DIR: ' \ +'--public-bucket-base-url=[]:PUBLIC_BUCKET_BASE_URL: ' \ +'--public-credentials-file=[]:PUBLIC_CREDENTIALS_FILE: ' \ +'--public-bucket-name=[]:PUBLIC_BUCKET_NAME: ' \ +'--public-location=[]:PUBLIC_LOCATION: ' \ +'--public-project-id=[]:PUBLIC_PROJECT_ID: ' \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ +'--bellman-cuda=[]' \ +'--setup-compressor-key=[]' \ +'--path=[]:PATH: ' \ +'--region=[]:REGION:(us europe asia)' \ +'--mode=[]:MODE:(download generate)' \ +'--setup-keys=[]' \ +'--setup-database=[]:SETUP_DATABASE:(true false)' \ +'--prover-db-url=[Prover database url without database name]:PROVER_DB_URL: ' \ +'--prover-db-name=[Prover database name]:PROVER_DB_NAME: ' \ +'-u+[Use default database urls and names]:USE_DEFAULT:(true false)' \ +'--use-default=[Use default database urls and names]:USE_DEFAULT:(true false)' \ +'-d+[]:DONT_DROP:(true false)' \ +'--dont-drop=[]:DONT_DROP:(true false)' \ +'--cloud-type=[]:CLOUD_TYPE:(gcp local)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--dev[]' \ +'(--bellman-cuda-dir)--clone[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +'--region=[]:REGION:(us europe asia)' \ +'--mode=[]:MODE:(download generate)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'--component=[]:COMPONENT:(gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor)' \ +'--round=[]:ROUND:(all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler)' \ +'--threads=[]:THREADS: ' \ +'--max-allocation=[Memory allocation limit in bytes (for prover component)]:MAX_ALLOCATION: ' \ +'--witness-vector-generator-count=[]:WITNESS_VECTOR_GENERATOR_COUNT: ' \ +'--max-allocation=[]:MAX_ALLOCATION: ' \ +'--docker=[]:DOCKER:(true false)' \ +'--tag=[]:TAG: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'(--bellman-cuda-dir)--clone[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +'--path=[]:PATH: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__prover__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-prover-help-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(server) +_arguments "${_arguments_options[@]}" : \ +'*--components=[Components of server to run]:COMPONENTS: ' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--genesis[Run server in genesis mode]' \ +'--build[Build server but don'\''t run it]' \ +'--uring[Enables uring support for RocksDB]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(external-node) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__external-node_commands" \ +"*::: :->external-node" \ +&& ret=0 + + case $state in + (external-node) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-external-node-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +'--db-url=[]:DB_URL: ' \ +'--db-name=[]:DB_NAME: ' \ +'--l1-rpc-url=[]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-u[Use default database urls and names]' \ +'--use-default[Use default database urls and names]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'*--components=[Components of server to run]:COMPONENTS: ' \ +'--enable-consensus=[Enable consensus]' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--reinit[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__external-node__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-external-node-help-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +'-o+[Enable Grafana]' \ +'--observability=[Enable Grafana]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contract-verifier) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__contract-verifier_commands" \ +"*::: :->contract-verifier" \ +&& ret=0 + + case $state in + (contract-verifier) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-contract-verifier-command-$line[1]:" + case $line[1] in + (run) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--zksolc-version=[Version of zksolc to install]:ZKSOLC_VERSION: ' \ +'--zkvyper-version=[Version of zkvyper to install]:ZKVYPER_VERSION: ' \ +'--solc-version=[Version of solc to install]:SOLC_VERSION: ' \ +'--era-vm-solc-version=[Version of era vm solc to install]:ERA_VM_SOLC_VERSION: ' \ +'--vyper-version=[Version of vyper to install]:VYPER_VERSION: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--only[Install only provided compilers]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__contract-verifier__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-contract-verifier-help-command-$line[1]:" + case $line[1] in + (run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(portal) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(explorer) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__explorer_commands" \ +"*::: :->explorer" \ +&& ret=0 + + case $state in + (explorer) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-explorer-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__explorer__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-explorer-help-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(consensus) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__consensus_commands" \ +"*::: :->consensus" \ +&& ret=0 + + case $state in + (consensus) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-consensus-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +'--from-file=[Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in \`commands/consensus/proto/mod.proto\`]:FROM_FILE:_files' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--from-genesis[Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__consensus__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-consensus-help-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(update) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-c[Update only the config files]' \ +'--only-config[Update only the config files]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(markdown) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-command-$line[1]:" + case $line[1] in + (autocomplete) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(ecosystem) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__ecosystem_commands" \ +"*::: :->ecosystem" \ +&& ret=0 + + case $state in + (ecosystem) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-ecosystem-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(chain) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain_commands" \ +"*::: :->chain" \ +&& ret=0 + + case $state in + (chain) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(convert-to-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate-to-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate-from-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(dev) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev_commands" \ +"*::: :->dev" \ +&& ret=0 + + case $state in + (dev) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-prover-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(external-node) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__external-node_commands" \ +"*::: :->external-node" \ +&& ret=0 + + case $state in + (external-node) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-external-node-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract-verifier) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__contract-verifier_commands" \ +"*::: :->contract-verifier" \ +&& ret=0 + + case $state in + (contract-verifier) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-contract-verifier-command-$line[1]:" + case $line[1] in + (run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(portal) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(explorer) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__explorer_commands" \ +"*::: :->explorer" \ +&& ret=0 + + case $state in + (explorer) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-explorer-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(consensus) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__consensus_commands" \ +"*::: :->consensus" \ +&& ret=0 + + case $state in + (consensus) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-consensus-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(update) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(markdown) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +} + +(( $+functions[_zkstack_commands] )) || +_zkstack_commands() { + local commands; commands=( +'autocomplete:Create shell autocompletion files' \ +'ecosystem:Ecosystem related commands' \ +'chain:Chain related commands' \ +'dev:Supervisor related commands' \ +'prover:Prover related commands' \ +'server:Run server' \ +'external-node:External Node related commands' \ +'containers:Run containers for local development' \ +'contract-verifier:Run contract verifier' \ +'portal:Run dapp-portal' \ +'explorer:Run block-explorer' \ +'consensus:Consensus utilities' \ +'update:Update ZKsync' \ +'markdown:Print markdown help' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack commands' commands "$@" +} +(( $+functions[_zkstack__autocomplete_commands] )) || +_zkstack__autocomplete_commands() { + local commands; commands=() + _describe -t commands 'zkstack autocomplete commands' commands "$@" +} +(( $+functions[_zkstack__chain_commands] )) || +_zkstack__chain_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'convert-to-gateway:Prepare chain to be an eligible gateway' \ +'migrate-to-gateway:Migrate chain to gateway' \ +'migrate-from-gateway:Migrate chain from gateway' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__accept-chain-ownership_commands] )) || +_zkstack__chain__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__chain__build-transactions_commands] )) || +_zkstack__chain__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__chain__convert-to-gateway_commands] )) || +_zkstack__chain__convert-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain convert-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__create_commands] )) || +_zkstack__chain__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain create commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-consensus-registry_commands] )) || +_zkstack__chain__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-l2-contracts_commands] )) || +_zkstack__chain__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-multicall3_commands] )) || +_zkstack__chain__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-paymaster_commands] )) || +_zkstack__chain__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-upgrader_commands] )) || +_zkstack__chain__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis_commands] )) || +_zkstack__chain__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain genesis commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help_commands] )) || +_zkstack__chain__genesis__help_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain genesis help commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__help_commands] )) || +_zkstack__chain__genesis__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__init-database_commands] )) || +_zkstack__chain__genesis__help__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__server_commands] )) || +_zkstack__chain__genesis__help__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help server commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__init-database_commands] )) || +_zkstack__chain__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__server_commands] )) || +_zkstack__chain__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis server commands' commands "$@" +} +(( $+functions[_zkstack__chain__help_commands] )) || +_zkstack__chain__help_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'convert-to-gateway:Prepare chain to be an eligible gateway' \ +'migrate-to-gateway:Migrate chain to gateway' \ +'migrate-from-gateway:Migrate chain from gateway' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain help commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__accept-chain-ownership_commands] )) || +_zkstack__chain__help__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__build-transactions_commands] )) || +_zkstack__chain__help__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__convert-to-gateway_commands] )) || +_zkstack__chain__help__convert-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help convert-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__create_commands] )) || +_zkstack__chain__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help create commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-consensus-registry_commands] )) || +_zkstack__chain__help__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-l2-contracts_commands] )) || +_zkstack__chain__help__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-multicall3_commands] )) || +_zkstack__chain__help__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-paymaster_commands] )) || +_zkstack__chain__help__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-upgrader_commands] )) || +_zkstack__chain__help__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis_commands] )) || +_zkstack__chain__help__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ + ) + _describe -t commands 'zkstack chain help genesis commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis__init-database_commands] )) || +_zkstack__chain__help__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis__server_commands] )) || +_zkstack__chain__help__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help genesis server commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__help_commands] )) || +_zkstack__chain__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__init_commands] )) || +_zkstack__chain__help__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ + ) + _describe -t commands 'zkstack chain help init commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__init__configs_commands] )) || +_zkstack__chain__help__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help init configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__initialize-bridges_commands] )) || +_zkstack__chain__help__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__migrate-from-gateway_commands] )) || +_zkstack__chain__help__migrate-from-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help migrate-from-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__migrate-to-gateway_commands] )) || +_zkstack__chain__help__migrate-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help migrate-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__register-chain_commands] )) || +_zkstack__chain__help__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help register-chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__update-token-multiplier-setter_commands] )) || +_zkstack__chain__help__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__chain__init_commands] )) || +_zkstack__chain__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain init commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__configs_commands] )) || +_zkstack__chain__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help_commands] )) || +_zkstack__chain__init__help_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain init help commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help__configs_commands] )) || +_zkstack__chain__init__help__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init help configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help__help_commands] )) || +_zkstack__chain__init__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__initialize-bridges_commands] )) || +_zkstack__chain__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__chain__migrate-from-gateway_commands] )) || +_zkstack__chain__migrate-from-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain migrate-from-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__migrate-to-gateway_commands] )) || +_zkstack__chain__migrate-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain migrate-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__register-chain_commands] )) || +_zkstack__chain__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain register-chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__update-token-multiplier-setter_commands] )) || +_zkstack__chain__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__consensus_commands] )) || +_zkstack__consensus_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack consensus commands' commands "$@" +} +(( $+functions[_zkstack__consensus__get-attester-committee_commands] )) || +_zkstack__consensus__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help_commands] )) || +_zkstack__consensus__help_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack consensus help commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__get-attester-committee_commands] )) || +_zkstack__consensus__help__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__help_commands] )) || +_zkstack__consensus__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help help commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__set-attester-committee_commands] )) || +_zkstack__consensus__help__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__set-attester-committee_commands] )) || +_zkstack__consensus__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__containers_commands] )) || +_zkstack__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack containers commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier_commands] )) || +_zkstack__contract-verifier_commands() { + local commands; commands=( +'run:Run contract verifier' \ +'init:Download required binaries for contract verifier' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack contract-verifier commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help_commands] )) || +_zkstack__contract-verifier__help_commands() { + local commands; commands=( +'run:Run contract verifier' \ +'init:Download required binaries for contract verifier' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack contract-verifier help commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__help_commands] )) || +_zkstack__contract-verifier__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help help commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__init_commands] )) || +_zkstack__contract-verifier__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help init commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__run_commands] )) || +_zkstack__contract-verifier__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help run commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__init_commands] )) || +_zkstack__contract-verifier__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier init commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__run_commands] )) || +_zkstack__contract-verifier__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier run commands' commands "$@" +} +(( $+functions[_zkstack__dev_commands] )) || +_zkstack__dev_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean_commands] )) || +_zkstack__dev__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev clean commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__all_commands] )) || +_zkstack__dev__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean all commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__containers_commands] )) || +_zkstack__dev__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__contracts-cache_commands] )) || +_zkstack__dev__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help_commands] )) || +_zkstack__dev__clean__help_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev clean help commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__all_commands] )) || +_zkstack__dev__clean__help__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help all commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__containers_commands] )) || +_zkstack__dev__clean__help__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__contracts-cache_commands] )) || +_zkstack__dev__clean__help__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__help_commands] )) || +_zkstack__dev__clean__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__config-writer_commands] )) || +_zkstack__dev__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev config-writer commands' commands "$@" +} +(( $+functions[_zkstack__dev__contracts_commands] )) || +_zkstack__dev__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__database_commands] )) || +_zkstack__dev__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev database commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__check-sqlx-data_commands] )) || +_zkstack__dev__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__drop_commands] )) || +_zkstack__dev__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help_commands] )) || +_zkstack__dev__database__help_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev database help commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__check-sqlx-data_commands] )) || +_zkstack__dev__database__help__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__drop_commands] )) || +_zkstack__dev__database__help__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__help_commands] )) || +_zkstack__dev__database__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__migrate_commands] )) || +_zkstack__dev__database__help__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__new-migration_commands] )) || +_zkstack__dev__database__help__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__prepare_commands] )) || +_zkstack__dev__database__help__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__reset_commands] )) || +_zkstack__dev__database__help__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__setup_commands] )) || +_zkstack__dev__database__help__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__migrate_commands] )) || +_zkstack__dev__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__new-migration_commands] )) || +_zkstack__dev__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__prepare_commands] )) || +_zkstack__dev__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__reset_commands] )) || +_zkstack__dev__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__setup_commands] )) || +_zkstack__dev__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt_commands] )) || +_zkstack__dev__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev fmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__contract_commands] )) || +_zkstack__dev__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help_commands] )) || +_zkstack__dev__fmt__help_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev fmt help commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__contract_commands] )) || +_zkstack__dev__fmt__help__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__help_commands] )) || +_zkstack__dev__fmt__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__prettier_commands] )) || +_zkstack__dev__fmt__help__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__rustfmt_commands] )) || +_zkstack__dev__fmt__help__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__prettier_commands] )) || +_zkstack__dev__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__rustfmt_commands] )) || +_zkstack__dev__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__generate-genesis_commands] )) || +_zkstack__dev__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__dev__help_commands] )) || +_zkstack__dev__help_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev help commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean_commands] )) || +_zkstack__dev__help__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ + ) + _describe -t commands 'zkstack dev help clean commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__all_commands] )) || +_zkstack__dev__help__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean all commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__containers_commands] )) || +_zkstack__dev__help__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__contracts-cache_commands] )) || +_zkstack__dev__help__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__config-writer_commands] )) || +_zkstack__dev__help__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help config-writer commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__contracts_commands] )) || +_zkstack__dev__help__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database_commands] )) || +_zkstack__dev__help__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ + ) + _describe -t commands 'zkstack dev help database commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__check-sqlx-data_commands] )) || +_zkstack__dev__help__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__drop_commands] )) || +_zkstack__dev__help__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__migrate_commands] )) || +_zkstack__dev__help__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__new-migration_commands] )) || +_zkstack__dev__help__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__prepare_commands] )) || +_zkstack__dev__help__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__reset_commands] )) || +_zkstack__dev__help__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__setup_commands] )) || +_zkstack__dev__help__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt_commands] )) || +_zkstack__dev__help__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ + ) + _describe -t commands 'zkstack dev help fmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__contract_commands] )) || +_zkstack__dev__help__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__prettier_commands] )) || +_zkstack__dev__help__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__rustfmt_commands] )) || +_zkstack__dev__help__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__generate-genesis_commands] )) || +_zkstack__dev__help__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__help_commands] )) || +_zkstack__dev__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__lint_commands] )) || +_zkstack__dev__help__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help lint commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover_commands] )) || +_zkstack__dev__help__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ + ) + _describe -t commands 'zkstack dev help prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__info_commands] )) || +_zkstack__dev__help__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover info commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__insert-batch_commands] )) || +_zkstack__dev__help__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__insert-version_commands] )) || +_zkstack__dev__help__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__send-transactions_commands] )) || +_zkstack__dev__help__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__snapshot_commands] )) || +_zkstack__dev__help__snapshot_commands() { + local commands; commands=( +'create:' \ + ) + _describe -t commands 'zkstack dev help snapshot commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__snapshot__create_commands] )) || +_zkstack__dev__help__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__status_commands] )) || +_zkstack__dev__help__status_commands() { + local commands; commands=( +'ports:Show used ports' \ + ) + _describe -t commands 'zkstack dev help status commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__status__ports_commands] )) || +_zkstack__dev__help__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help status ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test_commands] )) || +_zkstack__dev__help__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ + ) + _describe -t commands 'zkstack dev help test commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__build_commands] )) || +_zkstack__dev__help__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test build commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__fees_commands] )) || +_zkstack__dev__help__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__integration_commands] )) || +_zkstack__dev__help__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__l1-contracts_commands] )) || +_zkstack__dev__help__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__loadtest_commands] )) || +_zkstack__dev__help__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__prover_commands] )) || +_zkstack__dev__help__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__recovery_commands] )) || +_zkstack__dev__help__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__revert_commands] )) || +_zkstack__dev__help__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__rust_commands] )) || +_zkstack__dev__help__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__upgrade_commands] )) || +_zkstack__dev__help__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__wallet_commands] )) || +_zkstack__dev__help__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test wallet commands' commands "$@" +} +(( $+functions[_zkstack__dev__lint_commands] )) || +_zkstack__dev__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev lint commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover_commands] )) || +_zkstack__dev__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help_commands] )) || +_zkstack__dev__prover__help_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev prover help commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__help_commands] )) || +_zkstack__dev__prover__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__info_commands] )) || +_zkstack__dev__prover__help__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help info commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__insert-batch_commands] )) || +_zkstack__dev__prover__help__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__insert-version_commands] )) || +_zkstack__dev__prover__help__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__info_commands] )) || +_zkstack__dev__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover info commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__insert-batch_commands] )) || +_zkstack__dev__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__insert-version_commands] )) || +_zkstack__dev__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__send-transactions_commands] )) || +_zkstack__dev__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot_commands] )) || +_zkstack__dev__snapshot_commands() { + local commands; commands=( +'create:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev snapshot commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__create_commands] )) || +_zkstack__dev__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help_commands] )) || +_zkstack__dev__snapshot__help_commands() { + local commands; commands=( +'create:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev snapshot help commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help__create_commands] )) || +_zkstack__dev__snapshot__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot help create commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help__help_commands] )) || +_zkstack__dev__snapshot__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status_commands] )) || +_zkstack__dev__status_commands() { + local commands; commands=( +'ports:Show used ports' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev status commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help_commands] )) || +_zkstack__dev__status__help_commands() { + local commands; commands=( +'ports:Show used ports' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev status help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help__help_commands] )) || +_zkstack__dev__status__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help__ports_commands] )) || +_zkstack__dev__status__help__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status help ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__ports_commands] )) || +_zkstack__dev__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__test_commands] )) || +_zkstack__dev__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev test commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__build_commands] )) || +_zkstack__dev__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test build commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__fees_commands] )) || +_zkstack__dev__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help_commands] )) || +_zkstack__dev__test__help_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev test help commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__build_commands] )) || +_zkstack__dev__test__help__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help build commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__fees_commands] )) || +_zkstack__dev__test__help__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__help_commands] )) || +_zkstack__dev__test__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__integration_commands] )) || +_zkstack__dev__test__help__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__l1-contracts_commands] )) || +_zkstack__dev__test__help__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__loadtest_commands] )) || +_zkstack__dev__test__help__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__prover_commands] )) || +_zkstack__dev__test__help__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__recovery_commands] )) || +_zkstack__dev__test__help__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__revert_commands] )) || +_zkstack__dev__test__help__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__rust_commands] )) || +_zkstack__dev__test__help__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__upgrade_commands] )) || +_zkstack__dev__test__help__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__wallet_commands] )) || +_zkstack__dev__test__help__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help wallet commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__integration_commands] )) || +_zkstack__dev__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__l1-contracts_commands] )) || +_zkstack__dev__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__loadtest_commands] )) || +_zkstack__dev__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__prover_commands] )) || +_zkstack__dev__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__recovery_commands] )) || +_zkstack__dev__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__revert_commands] )) || +_zkstack__dev__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__rust_commands] )) || +_zkstack__dev__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__upgrade_commands] )) || +_zkstack__dev__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__wallet_commands] )) || +_zkstack__dev__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test wallet commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem_commands] )) || +_zkstack__ecosystem_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack ecosystem commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__build-transactions_commands] )) || +_zkstack__ecosystem__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__change-default-chain_commands] )) || +_zkstack__ecosystem__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__create_commands] )) || +_zkstack__ecosystem__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem create commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help_commands] )) || +_zkstack__ecosystem__help_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack ecosystem help commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__build-transactions_commands] )) || +_zkstack__ecosystem__help__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__change-default-chain_commands] )) || +_zkstack__ecosystem__help__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__create_commands] )) || +_zkstack__ecosystem__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help create commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__help_commands] )) || +_zkstack__ecosystem__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help help commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__init_commands] )) || +_zkstack__ecosystem__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help init commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__setup-observability_commands] )) || +_zkstack__ecosystem__help__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__init_commands] )) || +_zkstack__ecosystem__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem init commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__setup-observability_commands] )) || +_zkstack__ecosystem__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__explorer_commands] )) || +_zkstack__explorer_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack explorer commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help_commands] )) || +_zkstack__explorer__help_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack explorer help commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__help_commands] )) || +_zkstack__explorer__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help help commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__init_commands] )) || +_zkstack__explorer__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help init commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__run_commands] )) || +_zkstack__explorer__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help run commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__run-backend_commands] )) || +_zkstack__explorer__help__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help run-backend commands' commands "$@" +} +(( $+functions[_zkstack__explorer__init_commands] )) || +_zkstack__explorer__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer init commands' commands "$@" +} +(( $+functions[_zkstack__explorer__run_commands] )) || +_zkstack__explorer__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer run commands' commands "$@" +} +(( $+functions[_zkstack__explorer__run-backend_commands] )) || +_zkstack__explorer__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer run-backend commands' commands "$@" +} +(( $+functions[_zkstack__external-node_commands] )) || +_zkstack__external-node_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'run:Run external node' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack external-node commands' commands "$@" +} +(( $+functions[_zkstack__external-node__configs_commands] )) || +_zkstack__external-node__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node configs commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help_commands] )) || +_zkstack__external-node__help_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'run:Run external node' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack external-node help commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__configs_commands] )) || +_zkstack__external-node__help__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help configs commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__help_commands] )) || +_zkstack__external-node__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help help commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__init_commands] )) || +_zkstack__external-node__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help init commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__run_commands] )) || +_zkstack__external-node__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help run commands' commands "$@" +} +(( $+functions[_zkstack__external-node__init_commands] )) || +_zkstack__external-node__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node init commands' commands "$@" +} +(( $+functions[_zkstack__external-node__run_commands] )) || +_zkstack__external-node__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node run commands' commands "$@" +} +(( $+functions[_zkstack__help_commands] )) || +_zkstack__help_commands() { + local commands; commands=( +'autocomplete:Create shell autocompletion files' \ +'ecosystem:Ecosystem related commands' \ +'chain:Chain related commands' \ +'dev:Supervisor related commands' \ +'prover:Prover related commands' \ +'server:Run server' \ +'external-node:External Node related commands' \ +'containers:Run containers for local development' \ +'contract-verifier:Run contract verifier' \ +'portal:Run dapp-portal' \ +'explorer:Run block-explorer' \ +'consensus:Consensus utilities' \ +'update:Update ZKsync' \ +'markdown:Print markdown help' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack help commands' commands "$@" +} +(( $+functions[_zkstack__help__autocomplete_commands] )) || +_zkstack__help__autocomplete_commands() { + local commands; commands=() + _describe -t commands 'zkstack help autocomplete commands' commands "$@" +} +(( $+functions[_zkstack__help__chain_commands] )) || +_zkstack__help__chain_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'convert-to-gateway:Prepare chain to be an eligible gateway' \ +'migrate-to-gateway:Migrate chain to gateway' \ +'migrate-from-gateway:Migrate chain from gateway' \ + ) + _describe -t commands 'zkstack help chain commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__accept-chain-ownership_commands] )) || +_zkstack__help__chain__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__build-transactions_commands] )) || +_zkstack__help__chain__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__convert-to-gateway_commands] )) || +_zkstack__help__chain__convert-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain convert-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__create_commands] )) || +_zkstack__help__chain__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain create commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-consensus-registry_commands] )) || +_zkstack__help__chain__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-l2-contracts_commands] )) || +_zkstack__help__chain__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-multicall3_commands] )) || +_zkstack__help__chain__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-paymaster_commands] )) || +_zkstack__help__chain__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-upgrader_commands] )) || +_zkstack__help__chain__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis_commands] )) || +_zkstack__help__chain__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ + ) + _describe -t commands 'zkstack help chain genesis commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis__init-database_commands] )) || +_zkstack__help__chain__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis__server_commands] )) || +_zkstack__help__chain__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain genesis server commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__init_commands] )) || +_zkstack__help__chain__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ + ) + _describe -t commands 'zkstack help chain init commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__init__configs_commands] )) || +_zkstack__help__chain__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain init configs commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__initialize-bridges_commands] )) || +_zkstack__help__chain__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__migrate-from-gateway_commands] )) || +_zkstack__help__chain__migrate-from-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain migrate-from-gateway commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__migrate-to-gateway_commands] )) || +_zkstack__help__chain__migrate-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain migrate-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__register-chain_commands] )) || +_zkstack__help__chain__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain register-chain commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__update-token-multiplier-setter_commands] )) || +_zkstack__help__chain__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus_commands] )) || +_zkstack__help__consensus_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ + ) + _describe -t commands 'zkstack help consensus commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus__get-attester-committee_commands] )) || +_zkstack__help__consensus__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack help consensus get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus__set-attester-committee_commands] )) || +_zkstack__help__consensus__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack help consensus set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__help__containers_commands] )) || +_zkstack__help__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack help containers commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier_commands] )) || +_zkstack__help__contract-verifier_commands() { + local commands; commands=( +'run:Run contract verifier' \ +'init:Download required binaries for contract verifier' \ + ) + _describe -t commands 'zkstack help contract-verifier commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__init_commands] )) || +_zkstack__help__contract-verifier__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier init commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__run_commands] )) || +_zkstack__help__contract-verifier__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier run commands' commands "$@" +} +(( $+functions[_zkstack__help__dev_commands] )) || +_zkstack__help__dev_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ + ) + _describe -t commands 'zkstack help dev commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean_commands] )) || +_zkstack__help__dev__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ + ) + _describe -t commands 'zkstack help dev clean commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__all_commands] )) || +_zkstack__help__dev__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean all commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__containers_commands] )) || +_zkstack__help__dev__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean containers commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__contracts-cache_commands] )) || +_zkstack__help__dev__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__config-writer_commands] )) || +_zkstack__help__dev__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev config-writer commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__contracts_commands] )) || +_zkstack__help__dev__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database_commands] )) || +_zkstack__help__dev__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ + ) + _describe -t commands 'zkstack help dev database commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__check-sqlx-data_commands] )) || +_zkstack__help__dev__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__drop_commands] )) || +_zkstack__help__dev__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database drop commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__migrate_commands] )) || +_zkstack__help__dev__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database migrate commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__new-migration_commands] )) || +_zkstack__help__dev__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__prepare_commands] )) || +_zkstack__help__dev__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database prepare commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__reset_commands] )) || +_zkstack__help__dev__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database reset commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__setup_commands] )) || +_zkstack__help__dev__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database setup commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt_commands] )) || +_zkstack__help__dev__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ + ) + _describe -t commands 'zkstack help dev fmt commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__contract_commands] )) || +_zkstack__help__dev__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__prettier_commands] )) || +_zkstack__help__dev__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__rustfmt_commands] )) || +_zkstack__help__dev__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__generate-genesis_commands] )) || +_zkstack__help__dev__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__lint_commands] )) || +_zkstack__help__dev__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev lint commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover_commands] )) || +_zkstack__help__dev__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ + ) + _describe -t commands 'zkstack help dev prover commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__info_commands] )) || +_zkstack__help__dev__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover info commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__insert-batch_commands] )) || +_zkstack__help__dev__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__insert-version_commands] )) || +_zkstack__help__dev__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__send-transactions_commands] )) || +_zkstack__help__dev__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__snapshot_commands] )) || +_zkstack__help__dev__snapshot_commands() { + local commands; commands=( +'create:' \ + ) + _describe -t commands 'zkstack help dev snapshot commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__snapshot__create_commands] )) || +_zkstack__help__dev__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__status_commands] )) || +_zkstack__help__dev__status_commands() { + local commands; commands=( +'ports:Show used ports' \ + ) + _describe -t commands 'zkstack help dev status commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__status__ports_commands] )) || +_zkstack__help__dev__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev status ports commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test_commands] )) || +_zkstack__help__dev__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ + ) + _describe -t commands 'zkstack help dev test commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__build_commands] )) || +_zkstack__help__dev__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test build commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__fees_commands] )) || +_zkstack__help__dev__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test fees commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__integration_commands] )) || +_zkstack__help__dev__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test integration commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__l1-contracts_commands] )) || +_zkstack__help__dev__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__loadtest_commands] )) || +_zkstack__help__dev__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__prover_commands] )) || +_zkstack__help__dev__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test prover commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__recovery_commands] )) || +_zkstack__help__dev__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test recovery commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__revert_commands] )) || +_zkstack__help__dev__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test revert commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__rust_commands] )) || +_zkstack__help__dev__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test rust commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__upgrade_commands] )) || +_zkstack__help__dev__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__wallet_commands] )) || +_zkstack__help__dev__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test wallet commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem_commands] )) || +_zkstack__help__ecosystem_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ + ) + _describe -t commands 'zkstack help ecosystem commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__build-transactions_commands] )) || +_zkstack__help__ecosystem__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__change-default-chain_commands] )) || +_zkstack__help__ecosystem__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__create_commands] )) || +_zkstack__help__ecosystem__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem create commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__init_commands] )) || +_zkstack__help__ecosystem__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem init commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__setup-observability_commands] )) || +_zkstack__help__ecosystem__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer_commands] )) || +_zkstack__help__explorer_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ + ) + _describe -t commands 'zkstack help explorer commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__init_commands] )) || +_zkstack__help__explorer__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer init commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__run_commands] )) || +_zkstack__help__explorer__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer run commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__run-backend_commands] )) || +_zkstack__help__explorer__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer run-backend commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node_commands] )) || +_zkstack__help__external-node_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'run:Run external node' \ + ) + _describe -t commands 'zkstack help external-node commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__configs_commands] )) || +_zkstack__help__external-node__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node configs commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__init_commands] )) || +_zkstack__help__external-node__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node init commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__run_commands] )) || +_zkstack__help__external-node__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node run commands' commands "$@" +} +(( $+functions[_zkstack__help__help_commands] )) || +_zkstack__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack help help commands' commands "$@" +} +(( $+functions[_zkstack__help__markdown_commands] )) || +_zkstack__help__markdown_commands() { + local commands; commands=() + _describe -t commands 'zkstack help markdown commands' commands "$@" +} +(( $+functions[_zkstack__help__portal_commands] )) || +_zkstack__help__portal_commands() { + local commands; commands=() + _describe -t commands 'zkstack help portal commands' commands "$@" +} +(( $+functions[_zkstack__help__prover_commands] )) || +_zkstack__help__prover_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ + ) + _describe -t commands 'zkstack help prover commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__compressor-keys_commands] )) || +_zkstack__help__prover__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__init_commands] )) || +_zkstack__help__prover__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover init commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__init-bellman-cuda_commands] )) || +_zkstack__help__prover__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__run_commands] )) || +_zkstack__help__prover__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover run commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__setup-keys_commands] )) || +_zkstack__help__prover__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__help__server_commands] )) || +_zkstack__help__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack help server commands' commands "$@" +} +(( $+functions[_zkstack__help__update_commands] )) || +_zkstack__help__update_commands() { + local commands; commands=() + _describe -t commands 'zkstack help update commands' commands "$@" +} +(( $+functions[_zkstack__markdown_commands] )) || +_zkstack__markdown_commands() { + local commands; commands=() + _describe -t commands 'zkstack markdown commands' commands "$@" +} +(( $+functions[_zkstack__portal_commands] )) || +_zkstack__portal_commands() { + local commands; commands=() + _describe -t commands 'zkstack portal commands' commands "$@" +} +(( $+functions[_zkstack__prover_commands] )) || +_zkstack__prover_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack prover commands' commands "$@" +} +(( $+functions[_zkstack__prover__compressor-keys_commands] )) || +_zkstack__prover__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__help_commands] )) || +_zkstack__prover__help_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack prover help commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__compressor-keys_commands] )) || +_zkstack__prover__help__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__help_commands] )) || +_zkstack__prover__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help help commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__init_commands] )) || +_zkstack__prover__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help init commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__init-bellman-cuda_commands] )) || +_zkstack__prover__help__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__run_commands] )) || +_zkstack__prover__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help run commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__setup-keys_commands] )) || +_zkstack__prover__help__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__init_commands] )) || +_zkstack__prover__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover init commands' commands "$@" +} +(( $+functions[_zkstack__prover__init-bellman-cuda_commands] )) || +_zkstack__prover__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__prover__run_commands] )) || +_zkstack__prover__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover run commands' commands "$@" +} +(( $+functions[_zkstack__prover__setup-keys_commands] )) || +_zkstack__prover__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__server_commands] )) || +_zkstack__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack server commands' commands "$@" +} +(( $+functions[_zkstack__update_commands] )) || +_zkstack__update_commands() { + local commands; commands=() + _describe -t commands 'zkstack update commands' commands "$@" +} + +if [ "$funcstack[1]" = "_zkstack" ]; then + _zkstack "$@" +else + compdef _zkstack zkstack +fi diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.fish b/zkstack_cli/crates/zkstack/completion/zkstack.fish new file mode 100644 index 00000000000..9f449192003 --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/zkstack.fish @@ -0,0 +1,767 @@ +# Print an optspec for argparse to handle cmd's options that are independent of any subcommand. +function __fish_zkstack_global_optspecs + string join \n v/verbose chain= ignore-prerequisites h/help V/version +end + +function __fish_zkstack_needs_command + # Figure out if the current invocation already has a command. + set -l cmd (commandline -opc) + set -e cmd[1] + argparse -s (__fish_zkstack_global_optspecs) -- $cmd 2>/dev/null + or return + if set -q argv[1] + # Also print the command, so this can be used to figure out what it is. + echo $argv[1] + return 1 + end + return 0 +end + +function __fish_zkstack_using_subcommand + set -l cmd (__fish_zkstack_needs_command) + test -z "$cmd" + and return 1 + contains -- $cmd[1] $argv +end + +complete -c zkstack -n "__fish_zkstack_needs_command" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_needs_command" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_needs_command" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_needs_command" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_needs_command" -s V -l version -d 'Print version' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "autocomplete" -d 'Create shell autocompletion files' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "ecosystem" -d 'Ecosystem related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "chain" -d 'Chain related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "dev" -d 'Supervisor related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "prover" -d 'Prover related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "server" -d 'Run server' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "external-node" -d 'External Node related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "containers" -d 'Run containers for local development' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "contract-verifier" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "portal" -d 'Run dapp-portal' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "explorer" -d 'Run block-explorer' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "consensus" -d 'Consensus utilities' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "update" -d 'Update ZKsync' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "markdown" -d 'Print markdown help' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l generate -d 'The shell to generate the autocomplete script for' -r -f -a "{bash\t'',elvish\t'',fish\t'',powershell\t'',zsh\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s o -l out -d 'The out directory to write the autocomplete script to' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l ecosystem-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l l1-network -d 'L1 Network' -r -f -a "{localhost\t'',sepolia\t'',holesky\t'',mainnet\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l link-to-code -d 'Code link' -r -f -a "(__fish_complete_directories)" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain-id -d 'Chain ID' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l prover-mode -d 'Prover options' -r -f -a "{no-proofs\t'',gpu\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l wallet-creation -d 'Wallet options' -r -f -a "{localhost\t'Load wallets from localhost mnemonic, they are funded for localhost env',random\t'Generate random wallets',empty\t'Generate placeholder wallets',in-file\t'Specify file with wallets'}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l wallet-path -d 'Wallet path' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l l1-batch-commit-data-generator-mode -d 'Commit data generation mode' -r -f -a "{rollup\t'',validium\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-address -d 'Base token address' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l start-containers -d 'Start reth and postgres containers after creation' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l legacy-bridge +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l skip-submodules-checkout -d 'Skip submodules checkout' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l skip-contract-compilation-override -d 'Skip contract compilation override' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l sender -d 'Address of the transaction sender' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s o -l out -d 'Output directory for the generated files' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-erc20 -d 'Deploy ERC20 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-ecosystem -d 'Deploy ecosystem contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ecosystem-contracts-path -d 'Path to ecosystem contracts' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-paymaster -d 'Deploy Paymaster contract' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s o -l observability -d 'Enable Grafana' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ecosystem-only -d 'Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand)' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l dev -d 'Use defaults for all options and flags. Suitable for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l skip-submodules-checkout -d 'Skip submodules checkout' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l skip-contract-compilation-override -d 'Skip contract compilation override' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "convert-to-gateway" -d 'Prepare chain to be an eligible gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "migrate-to-gateway" -d 'Migrate chain to gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "migrate-from-gateway" -d 'Migrate chain from gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain-id -d 'Chain ID' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l prover-mode -d 'Prover options' -r -f -a "{no-proofs\t'',gpu\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l wallet-creation -d 'Wallet options' -r -f -a "{localhost\t'Load wallets from localhost mnemonic, they are funded for localhost env',random\t'Generate random wallets',empty\t'Generate placeholder wallets',in-file\t'Specify file with wallets'}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l wallet-path -d 'Wallet path' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l l1-batch-commit-data-generator-mode -d 'Commit data generation mode' -r -f -a "{rollup\t'',validium\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-address -d 'Base token address' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l legacy-bridge +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l skip-submodules-checkout -d 'Skip submodules checkout' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l skip-contract-compilation-override -d 'Skip contract compilation override' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s o -l out -d 'Output directory for the generated files' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l deploy-paymaster -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l dev -d 'Use defaults for all options and flags. Suitable for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l skip-submodules-checkout -d 'Skip submodules checkout' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -f -a "configs" -d 'Initialize chain configs' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s d -l dev -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "init-database" -d 'Initialize databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "server" -d 'Runs server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l gateway-chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l gateway-chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "convert-to-gateway" -d 'Prepare chain to be an eligible gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "migrate-to-gateway" -d 'Migrate chain to gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "migrate-from-gateway" -d 'Migrate chain from gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "check-sqlx-data" -d 'Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "drop" -d 'Drop databases. If no databases are selected, all databases will be dropped.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "migrate" -d 'Migrate databases. If no databases are selected, all databases will be migrated.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "new-migration" -d 'Create new migration' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "prepare" -d 'Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "reset" -d 'Reset databases. If no databases are selected, all databases will be reset.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "setup" -d 'Setup databases. If no databases are selected, all databases will be setup.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "integration" -d 'Run integration tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "fees" -d 'Run fees test' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "revert" -d 'Run revert tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "recovery" -d 'Run recovery tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "upgrade" -d 'Run upgrade tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "build" -d 'Build all test dependencies' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "rust" -d 'Run unit-tests, accepts optional cargo test flags' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "l1-contracts" -d 'Run L1 contracts tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "prover" -d 'Run prover tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "wallet" -d 'Print test wallets information' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "loadtest" -d 'Run loadtest' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "all" -d 'Remove containers and contracts cache' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "containers" -d 'Remove containers and docker volumes' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "contracts-cache" -d 'Remove contracts caches' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -f -a "create" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s t -l targets -r -f -a "{md\t'',sol\t'',js\t'',ts\t'',rs\t'',contracts\t'',autocompletion\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s c -l check +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s c -l check +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "rustfmt" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "contract" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "prettier" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "info" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "insert-batch" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "insert-version" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l1-contracts -d 'Build L1 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l1-da-contracts -d 'Build L1 DA contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l2-contracts -d 'Build L2 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l system-contracts -d 'Build system contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l test-contracts -d 'Build test contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s p -l path -d 'Path to the config file to override' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l file -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l private-key -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l l1-rpc-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l confirmations -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s u -l url -d 'URL of the health check endpoint' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -f -a "ports" -d 'Show used ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l proof-store-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bucket-base-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l credentials-file -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bucket-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l location -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l project-id -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l shall-save-to-public-bucket -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-store-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-bucket-base-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-credentials-file -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-bucket-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-location -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-project-id -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bellman-cuda-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bellman-cuda -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-compressor-key -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l path -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l region -r -f -a "{us\t'',europe\t'',asia\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l mode -r -f -a "{download\t'',generate\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-keys -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-database -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l prover-db-url -d 'Prover database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l prover-db-name -d 'Prover database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s u -l use-default -d 'Use default database urls and names' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s d -l dont-drop -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l cloud-type -r -f -a "{gcp\t'',local\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l dev +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l clone +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l region -r -f -a "{us\t'',europe\t'',asia\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l mode -r -f -a "{download\t'',generate\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l component -r -f -a "{gateway\t'',witness-generator\t'',witness-vector-generator\t'',prover\t'',circuit-prover\t'',compressor\t'',prover-job-monitor\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l round -r -f -a "{all-rounds\t'',basic-circuits\t'',leaf-aggregation\t'',node-aggregation\t'',recursion-tip\t'',scheduler\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l threads -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l max-allocation -d 'Memory allocation limit in bytes (for prover component)' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l witness-vector-generator-count -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l max-allocation -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l docker -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l tag -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l bellman-cuda-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l clone +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l path -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l components -d 'Components of server to run' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -s a -l additional-args -d 'Additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l genesis -d 'Run server in genesis mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l build -d 'Build server but don\'t run it' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l uring -d 'Enables uring support for RocksDB' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l db-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l db-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l l1-rpc-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s u -l use-default -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l components -d 'Components of server to run' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l enable-consensus -d 'Enable consensus' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s a -l additional-args -d 'Additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l reinit +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s o -l observability -d 'Enable Grafana' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l zksolc-version -d 'Version of zksolc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l zkvyper-version -d 'Version of zkvyper to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l solc-version -d 'Version of solc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l era-vm-solc-version -d 'Version of era vm solc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l vyper-version -d 'Version of vyper to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l only -d 'Install only provided compilers' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l from-file -d 'Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in `commands/consensus/proto/mod.proto`' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l from-genesis -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s c -l only-config -d 'Update only the config files' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "autocomplete" -d 'Create shell autocompletion files' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "ecosystem" -d 'Ecosystem related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "chain" -d 'Chain related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "dev" -d 'Supervisor related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "prover" -d 'Prover related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "server" -d 'Run server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "external-node" -d 'External Node related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "containers" -d 'Run containers for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "contract-verifier" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "portal" -d 'Run dapp-portal' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "explorer" -d 'Run block-explorer' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "consensus" -d 'Consensus utilities' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "update" -d 'Update ZKsync' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "markdown" -d 'Print markdown help' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "convert-to-gateway" -d 'Prepare chain to be an eligible gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "migrate-to-gateway" -d 'Migrate chain to gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "migrate-from-gateway" -d 'Migrate chain from gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from consensus" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from consensus" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.sh b/zkstack_cli/crates/zkstack/completion/zkstack.sh new file mode 100644 index 00000000000..57294750ca4 --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/zkstack.sh @@ -0,0 +1,7255 @@ +_zkstack() { + local i cur prev opts cmd + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + cmd="" + opts="" + + for i in ${COMP_WORDS[@]} + do + case "${cmd},${i}" in + ",$1") + cmd="zkstack" + ;; + zkstack,autocomplete) + cmd="zkstack__autocomplete" + ;; + zkstack,chain) + cmd="zkstack__chain" + ;; + zkstack,consensus) + cmd="zkstack__consensus" + ;; + zkstack,containers) + cmd="zkstack__containers" + ;; + zkstack,contract-verifier) + cmd="zkstack__contract__verifier" + ;; + zkstack,dev) + cmd="zkstack__dev" + ;; + zkstack,ecosystem) + cmd="zkstack__ecosystem" + ;; + zkstack,explorer) + cmd="zkstack__explorer" + ;; + zkstack,external-node) + cmd="zkstack__external__node" + ;; + zkstack,help) + cmd="zkstack__help" + ;; + zkstack,markdown) + cmd="zkstack__markdown" + ;; + zkstack,portal) + cmd="zkstack__portal" + ;; + zkstack,prover) + cmd="zkstack__prover" + ;; + zkstack,server) + cmd="zkstack__server" + ;; + zkstack,update) + cmd="zkstack__update" + ;; + zkstack__chain,accept-chain-ownership) + cmd="zkstack__chain__accept__chain__ownership" + ;; + zkstack__chain,build-transactions) + cmd="zkstack__chain__build__transactions" + ;; + zkstack__chain,convert-to-gateway) + cmd="zkstack__chain__convert__to__gateway" + ;; + zkstack__chain,create) + cmd="zkstack__chain__create" + ;; + zkstack__chain,deploy-consensus-registry) + cmd="zkstack__chain__deploy__consensus__registry" + ;; + zkstack__chain,deploy-l2-contracts) + cmd="zkstack__chain__deploy__l2__contracts" + ;; + zkstack__chain,deploy-multicall3) + cmd="zkstack__chain__deploy__multicall3" + ;; + zkstack__chain,deploy-paymaster) + cmd="zkstack__chain__deploy__paymaster" + ;; + zkstack__chain,deploy-upgrader) + cmd="zkstack__chain__deploy__upgrader" + ;; + zkstack__chain,genesis) + cmd="zkstack__chain__genesis" + ;; + zkstack__chain,help) + cmd="zkstack__chain__help" + ;; + zkstack__chain,init) + cmd="zkstack__chain__init" + ;; + zkstack__chain,initialize-bridges) + cmd="zkstack__chain__initialize__bridges" + ;; + zkstack__chain,migrate-from-gateway) + cmd="zkstack__chain__migrate__from__gateway" + ;; + zkstack__chain,migrate-to-gateway) + cmd="zkstack__chain__migrate__to__gateway" + ;; + zkstack__chain,register-chain) + cmd="zkstack__chain__register__chain" + ;; + zkstack__chain,update-token-multiplier-setter) + cmd="zkstack__chain__update__token__multiplier__setter" + ;; + zkstack__chain__genesis,help) + cmd="zkstack__chain__genesis__help" + ;; + zkstack__chain__genesis,init-database) + cmd="zkstack__chain__genesis__init__database" + ;; + zkstack__chain__genesis,server) + cmd="zkstack__chain__genesis__server" + ;; + zkstack__chain__genesis__help,help) + cmd="zkstack__chain__genesis__help__help" + ;; + zkstack__chain__genesis__help,init-database) + cmd="zkstack__chain__genesis__help__init__database" + ;; + zkstack__chain__genesis__help,server) + cmd="zkstack__chain__genesis__help__server" + ;; + zkstack__chain__help,accept-chain-ownership) + cmd="zkstack__chain__help__accept__chain__ownership" + ;; + zkstack__chain__help,build-transactions) + cmd="zkstack__chain__help__build__transactions" + ;; + zkstack__chain__help,convert-to-gateway) + cmd="zkstack__chain__help__convert__to__gateway" + ;; + zkstack__chain__help,create) + cmd="zkstack__chain__help__create" + ;; + zkstack__chain__help,deploy-consensus-registry) + cmd="zkstack__chain__help__deploy__consensus__registry" + ;; + zkstack__chain__help,deploy-l2-contracts) + cmd="zkstack__chain__help__deploy__l2__contracts" + ;; + zkstack__chain__help,deploy-multicall3) + cmd="zkstack__chain__help__deploy__multicall3" + ;; + zkstack__chain__help,deploy-paymaster) + cmd="zkstack__chain__help__deploy__paymaster" + ;; + zkstack__chain__help,deploy-upgrader) + cmd="zkstack__chain__help__deploy__upgrader" + ;; + zkstack__chain__help,genesis) + cmd="zkstack__chain__help__genesis" + ;; + zkstack__chain__help,help) + cmd="zkstack__chain__help__help" + ;; + zkstack__chain__help,init) + cmd="zkstack__chain__help__init" + ;; + zkstack__chain__help,initialize-bridges) + cmd="zkstack__chain__help__initialize__bridges" + ;; + zkstack__chain__help,migrate-from-gateway) + cmd="zkstack__chain__help__migrate__from__gateway" + ;; + zkstack__chain__help,migrate-to-gateway) + cmd="zkstack__chain__help__migrate__to__gateway" + ;; + zkstack__chain__help,register-chain) + cmd="zkstack__chain__help__register__chain" + ;; + zkstack__chain__help,update-token-multiplier-setter) + cmd="zkstack__chain__help__update__token__multiplier__setter" + ;; + zkstack__chain__help__genesis,init-database) + cmd="zkstack__chain__help__genesis__init__database" + ;; + zkstack__chain__help__genesis,server) + cmd="zkstack__chain__help__genesis__server" + ;; + zkstack__chain__help__init,configs) + cmd="zkstack__chain__help__init__configs" + ;; + zkstack__chain__init,configs) + cmd="zkstack__chain__init__configs" + ;; + zkstack__chain__init,help) + cmd="zkstack__chain__init__help" + ;; + zkstack__chain__init__help,configs) + cmd="zkstack__chain__init__help__configs" + ;; + zkstack__chain__init__help,help) + cmd="zkstack__chain__init__help__help" + ;; + zkstack__consensus,get-attester-committee) + cmd="zkstack__consensus__get__attester__committee" + ;; + zkstack__consensus,help) + cmd="zkstack__consensus__help" + ;; + zkstack__consensus,set-attester-committee) + cmd="zkstack__consensus__set__attester__committee" + ;; + zkstack__consensus__help,get-attester-committee) + cmd="zkstack__consensus__help__get__attester__committee" + ;; + zkstack__consensus__help,help) + cmd="zkstack__consensus__help__help" + ;; + zkstack__consensus__help,set-attester-committee) + cmd="zkstack__consensus__help__set__attester__committee" + ;; + zkstack__contract__verifier,help) + cmd="zkstack__contract__verifier__help" + ;; + zkstack__contract__verifier,init) + cmd="zkstack__contract__verifier__init" + ;; + zkstack__contract__verifier,run) + cmd="zkstack__contract__verifier__run" + ;; + zkstack__contract__verifier__help,help) + cmd="zkstack__contract__verifier__help__help" + ;; + zkstack__contract__verifier__help,init) + cmd="zkstack__contract__verifier__help__init" + ;; + zkstack__contract__verifier__help,run) + cmd="zkstack__contract__verifier__help__run" + ;; + zkstack__dev,clean) + cmd="zkstack__dev__clean" + ;; + zkstack__dev,config-writer) + cmd="zkstack__dev__config__writer" + ;; + zkstack__dev,contracts) + cmd="zkstack__dev__contracts" + ;; + zkstack__dev,database) + cmd="zkstack__dev__database" + ;; + zkstack__dev,fmt) + cmd="zkstack__dev__fmt" + ;; + zkstack__dev,generate-genesis) + cmd="zkstack__dev__generate__genesis" + ;; + zkstack__dev,help) + cmd="zkstack__dev__help" + ;; + zkstack__dev,lint) + cmd="zkstack__dev__lint" + ;; + zkstack__dev,prover) + cmd="zkstack__dev__prover" + ;; + zkstack__dev,send-transactions) + cmd="zkstack__dev__send__transactions" + ;; + zkstack__dev,snapshot) + cmd="zkstack__dev__snapshot" + ;; + zkstack__dev,status) + cmd="zkstack__dev__status" + ;; + zkstack__dev,test) + cmd="zkstack__dev__test" + ;; + zkstack__dev__clean,all) + cmd="zkstack__dev__clean__all" + ;; + zkstack__dev__clean,containers) + cmd="zkstack__dev__clean__containers" + ;; + zkstack__dev__clean,contracts-cache) + cmd="zkstack__dev__clean__contracts__cache" + ;; + zkstack__dev__clean,help) + cmd="zkstack__dev__clean__help" + ;; + zkstack__dev__clean__help,all) + cmd="zkstack__dev__clean__help__all" + ;; + zkstack__dev__clean__help,containers) + cmd="zkstack__dev__clean__help__containers" + ;; + zkstack__dev__clean__help,contracts-cache) + cmd="zkstack__dev__clean__help__contracts__cache" + ;; + zkstack__dev__clean__help,help) + cmd="zkstack__dev__clean__help__help" + ;; + zkstack__dev__database,check-sqlx-data) + cmd="zkstack__dev__database__check__sqlx__data" + ;; + zkstack__dev__database,drop) + cmd="zkstack__dev__database__drop" + ;; + zkstack__dev__database,help) + cmd="zkstack__dev__database__help" + ;; + zkstack__dev__database,migrate) + cmd="zkstack__dev__database__migrate" + ;; + zkstack__dev__database,new-migration) + cmd="zkstack__dev__database__new__migration" + ;; + zkstack__dev__database,prepare) + cmd="zkstack__dev__database__prepare" + ;; + zkstack__dev__database,reset) + cmd="zkstack__dev__database__reset" + ;; + zkstack__dev__database,setup) + cmd="zkstack__dev__database__setup" + ;; + zkstack__dev__database__help,check-sqlx-data) + cmd="zkstack__dev__database__help__check__sqlx__data" + ;; + zkstack__dev__database__help,drop) + cmd="zkstack__dev__database__help__drop" + ;; + zkstack__dev__database__help,help) + cmd="zkstack__dev__database__help__help" + ;; + zkstack__dev__database__help,migrate) + cmd="zkstack__dev__database__help__migrate" + ;; + zkstack__dev__database__help,new-migration) + cmd="zkstack__dev__database__help__new__migration" + ;; + zkstack__dev__database__help,prepare) + cmd="zkstack__dev__database__help__prepare" + ;; + zkstack__dev__database__help,reset) + cmd="zkstack__dev__database__help__reset" + ;; + zkstack__dev__database__help,setup) + cmd="zkstack__dev__database__help__setup" + ;; + zkstack__dev__fmt,contract) + cmd="zkstack__dev__fmt__contract" + ;; + zkstack__dev__fmt,help) + cmd="zkstack__dev__fmt__help" + ;; + zkstack__dev__fmt,prettier) + cmd="zkstack__dev__fmt__prettier" + ;; + zkstack__dev__fmt,rustfmt) + cmd="zkstack__dev__fmt__rustfmt" + ;; + zkstack__dev__fmt__help,contract) + cmd="zkstack__dev__fmt__help__contract" + ;; + zkstack__dev__fmt__help,help) + cmd="zkstack__dev__fmt__help__help" + ;; + zkstack__dev__fmt__help,prettier) + cmd="zkstack__dev__fmt__help__prettier" + ;; + zkstack__dev__fmt__help,rustfmt) + cmd="zkstack__dev__fmt__help__rustfmt" + ;; + zkstack__dev__help,clean) + cmd="zkstack__dev__help__clean" + ;; + zkstack__dev__help,config-writer) + cmd="zkstack__dev__help__config__writer" + ;; + zkstack__dev__help,contracts) + cmd="zkstack__dev__help__contracts" + ;; + zkstack__dev__help,database) + cmd="zkstack__dev__help__database" + ;; + zkstack__dev__help,fmt) + cmd="zkstack__dev__help__fmt" + ;; + zkstack__dev__help,generate-genesis) + cmd="zkstack__dev__help__generate__genesis" + ;; + zkstack__dev__help,help) + cmd="zkstack__dev__help__help" + ;; + zkstack__dev__help,lint) + cmd="zkstack__dev__help__lint" + ;; + zkstack__dev__help,prover) + cmd="zkstack__dev__help__prover" + ;; + zkstack__dev__help,send-transactions) + cmd="zkstack__dev__help__send__transactions" + ;; + zkstack__dev__help,snapshot) + cmd="zkstack__dev__help__snapshot" + ;; + zkstack__dev__help,status) + cmd="zkstack__dev__help__status" + ;; + zkstack__dev__help,test) + cmd="zkstack__dev__help__test" + ;; + zkstack__dev__help__clean,all) + cmd="zkstack__dev__help__clean__all" + ;; + zkstack__dev__help__clean,containers) + cmd="zkstack__dev__help__clean__containers" + ;; + zkstack__dev__help__clean,contracts-cache) + cmd="zkstack__dev__help__clean__contracts__cache" + ;; + zkstack__dev__help__database,check-sqlx-data) + cmd="zkstack__dev__help__database__check__sqlx__data" + ;; + zkstack__dev__help__database,drop) + cmd="zkstack__dev__help__database__drop" + ;; + zkstack__dev__help__database,migrate) + cmd="zkstack__dev__help__database__migrate" + ;; + zkstack__dev__help__database,new-migration) + cmd="zkstack__dev__help__database__new__migration" + ;; + zkstack__dev__help__database,prepare) + cmd="zkstack__dev__help__database__prepare" + ;; + zkstack__dev__help__database,reset) + cmd="zkstack__dev__help__database__reset" + ;; + zkstack__dev__help__database,setup) + cmd="zkstack__dev__help__database__setup" + ;; + zkstack__dev__help__fmt,contract) + cmd="zkstack__dev__help__fmt__contract" + ;; + zkstack__dev__help__fmt,prettier) + cmd="zkstack__dev__help__fmt__prettier" + ;; + zkstack__dev__help__fmt,rustfmt) + cmd="zkstack__dev__help__fmt__rustfmt" + ;; + zkstack__dev__help__prover,info) + cmd="zkstack__dev__help__prover__info" + ;; + zkstack__dev__help__prover,insert-batch) + cmd="zkstack__dev__help__prover__insert__batch" + ;; + zkstack__dev__help__prover,insert-version) + cmd="zkstack__dev__help__prover__insert__version" + ;; + zkstack__dev__help__snapshot,create) + cmd="zkstack__dev__help__snapshot__create" + ;; + zkstack__dev__help__status,ports) + cmd="zkstack__dev__help__status__ports" + ;; + zkstack__dev__help__test,build) + cmd="zkstack__dev__help__test__build" + ;; + zkstack__dev__help__test,fees) + cmd="zkstack__dev__help__test__fees" + ;; + zkstack__dev__help__test,integration) + cmd="zkstack__dev__help__test__integration" + ;; + zkstack__dev__help__test,l1-contracts) + cmd="zkstack__dev__help__test__l1__contracts" + ;; + zkstack__dev__help__test,loadtest) + cmd="zkstack__dev__help__test__loadtest" + ;; + zkstack__dev__help__test,prover) + cmd="zkstack__dev__help__test__prover" + ;; + zkstack__dev__help__test,recovery) + cmd="zkstack__dev__help__test__recovery" + ;; + zkstack__dev__help__test,revert) + cmd="zkstack__dev__help__test__revert" + ;; + zkstack__dev__help__test,rust) + cmd="zkstack__dev__help__test__rust" + ;; + zkstack__dev__help__test,upgrade) + cmd="zkstack__dev__help__test__upgrade" + ;; + zkstack__dev__help__test,wallet) + cmd="zkstack__dev__help__test__wallet" + ;; + zkstack__dev__prover,help) + cmd="zkstack__dev__prover__help" + ;; + zkstack__dev__prover,info) + cmd="zkstack__dev__prover__info" + ;; + zkstack__dev__prover,insert-batch) + cmd="zkstack__dev__prover__insert__batch" + ;; + zkstack__dev__prover,insert-version) + cmd="zkstack__dev__prover__insert__version" + ;; + zkstack__dev__prover__help,help) + cmd="zkstack__dev__prover__help__help" + ;; + zkstack__dev__prover__help,info) + cmd="zkstack__dev__prover__help__info" + ;; + zkstack__dev__prover__help,insert-batch) + cmd="zkstack__dev__prover__help__insert__batch" + ;; + zkstack__dev__prover__help,insert-version) + cmd="zkstack__dev__prover__help__insert__version" + ;; + zkstack__dev__snapshot,create) + cmd="zkstack__dev__snapshot__create" + ;; + zkstack__dev__snapshot,help) + cmd="zkstack__dev__snapshot__help" + ;; + zkstack__dev__snapshot__help,create) + cmd="zkstack__dev__snapshot__help__create" + ;; + zkstack__dev__snapshot__help,help) + cmd="zkstack__dev__snapshot__help__help" + ;; + zkstack__dev__status,help) + cmd="zkstack__dev__status__help" + ;; + zkstack__dev__status,ports) + cmd="zkstack__dev__status__ports" + ;; + zkstack__dev__status__help,help) + cmd="zkstack__dev__status__help__help" + ;; + zkstack__dev__status__help,ports) + cmd="zkstack__dev__status__help__ports" + ;; + zkstack__dev__test,build) + cmd="zkstack__dev__test__build" + ;; + zkstack__dev__test,fees) + cmd="zkstack__dev__test__fees" + ;; + zkstack__dev__test,help) + cmd="zkstack__dev__test__help" + ;; + zkstack__dev__test,integration) + cmd="zkstack__dev__test__integration" + ;; + zkstack__dev__test,l1-contracts) + cmd="zkstack__dev__test__l1__contracts" + ;; + zkstack__dev__test,loadtest) + cmd="zkstack__dev__test__loadtest" + ;; + zkstack__dev__test,prover) + cmd="zkstack__dev__test__prover" + ;; + zkstack__dev__test,recovery) + cmd="zkstack__dev__test__recovery" + ;; + zkstack__dev__test,revert) + cmd="zkstack__dev__test__revert" + ;; + zkstack__dev__test,rust) + cmd="zkstack__dev__test__rust" + ;; + zkstack__dev__test,upgrade) + cmd="zkstack__dev__test__upgrade" + ;; + zkstack__dev__test,wallet) + cmd="zkstack__dev__test__wallet" + ;; + zkstack__dev__test__help,build) + cmd="zkstack__dev__test__help__build" + ;; + zkstack__dev__test__help,fees) + cmd="zkstack__dev__test__help__fees" + ;; + zkstack__dev__test__help,help) + cmd="zkstack__dev__test__help__help" + ;; + zkstack__dev__test__help,integration) + cmd="zkstack__dev__test__help__integration" + ;; + zkstack__dev__test__help,l1-contracts) + cmd="zkstack__dev__test__help__l1__contracts" + ;; + zkstack__dev__test__help,loadtest) + cmd="zkstack__dev__test__help__loadtest" + ;; + zkstack__dev__test__help,prover) + cmd="zkstack__dev__test__help__prover" + ;; + zkstack__dev__test__help,recovery) + cmd="zkstack__dev__test__help__recovery" + ;; + zkstack__dev__test__help,revert) + cmd="zkstack__dev__test__help__revert" + ;; + zkstack__dev__test__help,rust) + cmd="zkstack__dev__test__help__rust" + ;; + zkstack__dev__test__help,upgrade) + cmd="zkstack__dev__test__help__upgrade" + ;; + zkstack__dev__test__help,wallet) + cmd="zkstack__dev__test__help__wallet" + ;; + zkstack__ecosystem,build-transactions) + cmd="zkstack__ecosystem__build__transactions" + ;; + zkstack__ecosystem,change-default-chain) + cmd="zkstack__ecosystem__change__default__chain" + ;; + zkstack__ecosystem,create) + cmd="zkstack__ecosystem__create" + ;; + zkstack__ecosystem,help) + cmd="zkstack__ecosystem__help" + ;; + zkstack__ecosystem,init) + cmd="zkstack__ecosystem__init" + ;; + zkstack__ecosystem,setup-observability) + cmd="zkstack__ecosystem__setup__observability" + ;; + zkstack__ecosystem__help,build-transactions) + cmd="zkstack__ecosystem__help__build__transactions" + ;; + zkstack__ecosystem__help,change-default-chain) + cmd="zkstack__ecosystem__help__change__default__chain" + ;; + zkstack__ecosystem__help,create) + cmd="zkstack__ecosystem__help__create" + ;; + zkstack__ecosystem__help,help) + cmd="zkstack__ecosystem__help__help" + ;; + zkstack__ecosystem__help,init) + cmd="zkstack__ecosystem__help__init" + ;; + zkstack__ecosystem__help,setup-observability) + cmd="zkstack__ecosystem__help__setup__observability" + ;; + zkstack__explorer,help) + cmd="zkstack__explorer__help" + ;; + zkstack__explorer,init) + cmd="zkstack__explorer__init" + ;; + zkstack__explorer,run) + cmd="zkstack__explorer__run" + ;; + zkstack__explorer,run-backend) + cmd="zkstack__explorer__run__backend" + ;; + zkstack__explorer__help,help) + cmd="zkstack__explorer__help__help" + ;; + zkstack__explorer__help,init) + cmd="zkstack__explorer__help__init" + ;; + zkstack__explorer__help,run) + cmd="zkstack__explorer__help__run" + ;; + zkstack__explorer__help,run-backend) + cmd="zkstack__explorer__help__run__backend" + ;; + zkstack__external__node,configs) + cmd="zkstack__external__node__configs" + ;; + zkstack__external__node,help) + cmd="zkstack__external__node__help" + ;; + zkstack__external__node,init) + cmd="zkstack__external__node__init" + ;; + zkstack__external__node,run) + cmd="zkstack__external__node__run" + ;; + zkstack__external__node__help,configs) + cmd="zkstack__external__node__help__configs" + ;; + zkstack__external__node__help,help) + cmd="zkstack__external__node__help__help" + ;; + zkstack__external__node__help,init) + cmd="zkstack__external__node__help__init" + ;; + zkstack__external__node__help,run) + cmd="zkstack__external__node__help__run" + ;; + zkstack__help,autocomplete) + cmd="zkstack__help__autocomplete" + ;; + zkstack__help,chain) + cmd="zkstack__help__chain" + ;; + zkstack__help,consensus) + cmd="zkstack__help__consensus" + ;; + zkstack__help,containers) + cmd="zkstack__help__containers" + ;; + zkstack__help,contract-verifier) + cmd="zkstack__help__contract__verifier" + ;; + zkstack__help,dev) + cmd="zkstack__help__dev" + ;; + zkstack__help,ecosystem) + cmd="zkstack__help__ecosystem" + ;; + zkstack__help,explorer) + cmd="zkstack__help__explorer" + ;; + zkstack__help,external-node) + cmd="zkstack__help__external__node" + ;; + zkstack__help,help) + cmd="zkstack__help__help" + ;; + zkstack__help,markdown) + cmd="zkstack__help__markdown" + ;; + zkstack__help,portal) + cmd="zkstack__help__portal" + ;; + zkstack__help,prover) + cmd="zkstack__help__prover" + ;; + zkstack__help,server) + cmd="zkstack__help__server" + ;; + zkstack__help,update) + cmd="zkstack__help__update" + ;; + zkstack__help__chain,accept-chain-ownership) + cmd="zkstack__help__chain__accept__chain__ownership" + ;; + zkstack__help__chain,build-transactions) + cmd="zkstack__help__chain__build__transactions" + ;; + zkstack__help__chain,convert-to-gateway) + cmd="zkstack__help__chain__convert__to__gateway" + ;; + zkstack__help__chain,create) + cmd="zkstack__help__chain__create" + ;; + zkstack__help__chain,deploy-consensus-registry) + cmd="zkstack__help__chain__deploy__consensus__registry" + ;; + zkstack__help__chain,deploy-l2-contracts) + cmd="zkstack__help__chain__deploy__l2__contracts" + ;; + zkstack__help__chain,deploy-multicall3) + cmd="zkstack__help__chain__deploy__multicall3" + ;; + zkstack__help__chain,deploy-paymaster) + cmd="zkstack__help__chain__deploy__paymaster" + ;; + zkstack__help__chain,deploy-upgrader) + cmd="zkstack__help__chain__deploy__upgrader" + ;; + zkstack__help__chain,genesis) + cmd="zkstack__help__chain__genesis" + ;; + zkstack__help__chain,init) + cmd="zkstack__help__chain__init" + ;; + zkstack__help__chain,initialize-bridges) + cmd="zkstack__help__chain__initialize__bridges" + ;; + zkstack__help__chain,migrate-from-gateway) + cmd="zkstack__help__chain__migrate__from__gateway" + ;; + zkstack__help__chain,migrate-to-gateway) + cmd="zkstack__help__chain__migrate__to__gateway" + ;; + zkstack__help__chain,register-chain) + cmd="zkstack__help__chain__register__chain" + ;; + zkstack__help__chain,update-token-multiplier-setter) + cmd="zkstack__help__chain__update__token__multiplier__setter" + ;; + zkstack__help__chain__genesis,init-database) + cmd="zkstack__help__chain__genesis__init__database" + ;; + zkstack__help__chain__genesis,server) + cmd="zkstack__help__chain__genesis__server" + ;; + zkstack__help__chain__init,configs) + cmd="zkstack__help__chain__init__configs" + ;; + zkstack__help__consensus,get-attester-committee) + cmd="zkstack__help__consensus__get__attester__committee" + ;; + zkstack__help__consensus,set-attester-committee) + cmd="zkstack__help__consensus__set__attester__committee" + ;; + zkstack__help__contract__verifier,init) + cmd="zkstack__help__contract__verifier__init" + ;; + zkstack__help__contract__verifier,run) + cmd="zkstack__help__contract__verifier__run" + ;; + zkstack__help__dev,clean) + cmd="zkstack__help__dev__clean" + ;; + zkstack__help__dev,config-writer) + cmd="zkstack__help__dev__config__writer" + ;; + zkstack__help__dev,contracts) + cmd="zkstack__help__dev__contracts" + ;; + zkstack__help__dev,database) + cmd="zkstack__help__dev__database" + ;; + zkstack__help__dev,fmt) + cmd="zkstack__help__dev__fmt" + ;; + zkstack__help__dev,generate-genesis) + cmd="zkstack__help__dev__generate__genesis" + ;; + zkstack__help__dev,lint) + cmd="zkstack__help__dev__lint" + ;; + zkstack__help__dev,prover) + cmd="zkstack__help__dev__prover" + ;; + zkstack__help__dev,send-transactions) + cmd="zkstack__help__dev__send__transactions" + ;; + zkstack__help__dev,snapshot) + cmd="zkstack__help__dev__snapshot" + ;; + zkstack__help__dev,status) + cmd="zkstack__help__dev__status" + ;; + zkstack__help__dev,test) + cmd="zkstack__help__dev__test" + ;; + zkstack__help__dev__clean,all) + cmd="zkstack__help__dev__clean__all" + ;; + zkstack__help__dev__clean,containers) + cmd="zkstack__help__dev__clean__containers" + ;; + zkstack__help__dev__clean,contracts-cache) + cmd="zkstack__help__dev__clean__contracts__cache" + ;; + zkstack__help__dev__database,check-sqlx-data) + cmd="zkstack__help__dev__database__check__sqlx__data" + ;; + zkstack__help__dev__database,drop) + cmd="zkstack__help__dev__database__drop" + ;; + zkstack__help__dev__database,migrate) + cmd="zkstack__help__dev__database__migrate" + ;; + zkstack__help__dev__database,new-migration) + cmd="zkstack__help__dev__database__new__migration" + ;; + zkstack__help__dev__database,prepare) + cmd="zkstack__help__dev__database__prepare" + ;; + zkstack__help__dev__database,reset) + cmd="zkstack__help__dev__database__reset" + ;; + zkstack__help__dev__database,setup) + cmd="zkstack__help__dev__database__setup" + ;; + zkstack__help__dev__fmt,contract) + cmd="zkstack__help__dev__fmt__contract" + ;; + zkstack__help__dev__fmt,prettier) + cmd="zkstack__help__dev__fmt__prettier" + ;; + zkstack__help__dev__fmt,rustfmt) + cmd="zkstack__help__dev__fmt__rustfmt" + ;; + zkstack__help__dev__prover,info) + cmd="zkstack__help__dev__prover__info" + ;; + zkstack__help__dev__prover,insert-batch) + cmd="zkstack__help__dev__prover__insert__batch" + ;; + zkstack__help__dev__prover,insert-version) + cmd="zkstack__help__dev__prover__insert__version" + ;; + zkstack__help__dev__snapshot,create) + cmd="zkstack__help__dev__snapshot__create" + ;; + zkstack__help__dev__status,ports) + cmd="zkstack__help__dev__status__ports" + ;; + zkstack__help__dev__test,build) + cmd="zkstack__help__dev__test__build" + ;; + zkstack__help__dev__test,fees) + cmd="zkstack__help__dev__test__fees" + ;; + zkstack__help__dev__test,integration) + cmd="zkstack__help__dev__test__integration" + ;; + zkstack__help__dev__test,l1-contracts) + cmd="zkstack__help__dev__test__l1__contracts" + ;; + zkstack__help__dev__test,loadtest) + cmd="zkstack__help__dev__test__loadtest" + ;; + zkstack__help__dev__test,prover) + cmd="zkstack__help__dev__test__prover" + ;; + zkstack__help__dev__test,recovery) + cmd="zkstack__help__dev__test__recovery" + ;; + zkstack__help__dev__test,revert) + cmd="zkstack__help__dev__test__revert" + ;; + zkstack__help__dev__test,rust) + cmd="zkstack__help__dev__test__rust" + ;; + zkstack__help__dev__test,upgrade) + cmd="zkstack__help__dev__test__upgrade" + ;; + zkstack__help__dev__test,wallet) + cmd="zkstack__help__dev__test__wallet" + ;; + zkstack__help__ecosystem,build-transactions) + cmd="zkstack__help__ecosystem__build__transactions" + ;; + zkstack__help__ecosystem,change-default-chain) + cmd="zkstack__help__ecosystem__change__default__chain" + ;; + zkstack__help__ecosystem,create) + cmd="zkstack__help__ecosystem__create" + ;; + zkstack__help__ecosystem,init) + cmd="zkstack__help__ecosystem__init" + ;; + zkstack__help__ecosystem,setup-observability) + cmd="zkstack__help__ecosystem__setup__observability" + ;; + zkstack__help__explorer,init) + cmd="zkstack__help__explorer__init" + ;; + zkstack__help__explorer,run) + cmd="zkstack__help__explorer__run" + ;; + zkstack__help__explorer,run-backend) + cmd="zkstack__help__explorer__run__backend" + ;; + zkstack__help__external__node,configs) + cmd="zkstack__help__external__node__configs" + ;; + zkstack__help__external__node,init) + cmd="zkstack__help__external__node__init" + ;; + zkstack__help__external__node,run) + cmd="zkstack__help__external__node__run" + ;; + zkstack__help__prover,compressor-keys) + cmd="zkstack__help__prover__compressor__keys" + ;; + zkstack__help__prover,init) + cmd="zkstack__help__prover__init" + ;; + zkstack__help__prover,init-bellman-cuda) + cmd="zkstack__help__prover__init__bellman__cuda" + ;; + zkstack__help__prover,run) + cmd="zkstack__help__prover__run" + ;; + zkstack__help__prover,setup-keys) + cmd="zkstack__help__prover__setup__keys" + ;; + zkstack__prover,compressor-keys) + cmd="zkstack__prover__compressor__keys" + ;; + zkstack__prover,help) + cmd="zkstack__prover__help" + ;; + zkstack__prover,init) + cmd="zkstack__prover__init" + ;; + zkstack__prover,init-bellman-cuda) + cmd="zkstack__prover__init__bellman__cuda" + ;; + zkstack__prover,run) + cmd="zkstack__prover__run" + ;; + zkstack__prover,setup-keys) + cmd="zkstack__prover__setup__keys" + ;; + zkstack__prover__help,compressor-keys) + cmd="zkstack__prover__help__compressor__keys" + ;; + zkstack__prover__help,help) + cmd="zkstack__prover__help__help" + ;; + zkstack__prover__help,init) + cmd="zkstack__prover__help__init" + ;; + zkstack__prover__help,init-bellman-cuda) + cmd="zkstack__prover__help__init__bellman__cuda" + ;; + zkstack__prover__help,run) + cmd="zkstack__prover__help__run" + ;; + zkstack__prover__help,setup-keys) + cmd="zkstack__prover__help__setup__keys" + ;; + *) + ;; + esac + done + + case "${cmd}" in + zkstack) + opts="-v -h -V --verbose --chain --ignore-prerequisites --help --version autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__autocomplete) + opts="-o -v -h --generate --out --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --generate) + COMPREPLY=($(compgen -W "bash elvish fish powershell zsh" -- "${cur}")) + return 0 + ;; + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain) + opts="-v -h --verbose --chain --ignore-prerequisites --help create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__accept__chain__ownership) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__build__transactions) + opts="-o -a -v -h --out --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --l1-rpc-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__convert__to__gateway) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__create) + opts="-v -h --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --skip-submodules-checkout --skip-contract-compilation-override --evm-emulator --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-mode) + COMPREPLY=($(compgen -W "no-proofs gpu" -- "${cur}")) + return 0 + ;; + --wallet-creation) + COMPREPLY=($(compgen -W "localhost random empty in-file" -- "${cur}")) + return 0 + ;; + --wallet-path) + local oldifs + if [ -n "${IFS+x}" ]; then + oldifs="$IFS" + fi + IFS=$'\n' + COMPREPLY=($(compgen -f "${cur}")) + if [ -n "${oldifs+x}" ]; then + IFS="$oldifs" + fi + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o filenames + fi + return 0 + ;; + --l1-batch-commit-data-generator-mode) + COMPREPLY=($(compgen -W "rollup validium" -- "${cur}")) + return 0 + ;; + --base-token-address) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-nominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-denominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --set-as-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__consensus__registry) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__l2__contracts) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__multicall3) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__paymaster) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__upgrader) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis) + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --verbose --chain --ignore-prerequisites --help init-database server help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help) + opts="init-database server help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__init__database) + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__server) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help) + opts="create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__accept__chain__ownership) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__convert__to__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__consensus__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__l2__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__multicall3) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__paymaster) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__upgrader) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis) + opts="init-database server" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__init) + opts="configs" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__init__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__initialize__bridges) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__migrate__from__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__migrate__to__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__register__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__update__token__multiplier__setter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init) + opts="-a -d -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --server-db-url --server-db-name --dont-drop --deploy-paymaster --l1-rpc-url --no-port-reallocation --dev --skip-submodules-checkout --verbose --chain --ignore-prerequisites --help configs help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --deploy-paymaster) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__configs) + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --l1-rpc-url --no-port-reallocation --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help) + opts="configs help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__initialize__bridges) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__migrate__from__gateway) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --gateway-chain-name --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --gateway-chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__migrate__to__gateway) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --gateway-chain-name --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --gateway-chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__register__chain) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__update__token__multiplier__setter) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus) + opts="-v -h --verbose --chain --ignore-prerequisites --help set-attester-committee get-attester-committee help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__get__attester__committee) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help) + opts="set-attester-committee get-attester-committee help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__get__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__set__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__set__attester__committee) + opts="-v -h --from-genesis --from-file --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --from-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__containers) + opts="-o -v -h --observability --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --observability) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier) + opts="-v -h --verbose --chain --ignore-prerequisites --help run init help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help) + opts="run init help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__init) + opts="-v -h --zksolc-version --zkvyper-version --solc-version --era-vm-solc-version --vyper-version --only --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --zksolc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --zkvyper-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --solc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --era-vm-solc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --vyper-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__run) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev) + opts="-v -h --verbose --chain --ignore-prerequisites --help database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean) + opts="-v -h --verbose --chain --ignore-prerequisites --help all containers contracts-cache help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__all) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__containers) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__contracts__cache) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help) + opts="all containers contracts-cache help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__config__writer) + opts="-p -v -h --path --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__contracts) + opts="-v -h --l1-contracts --l1-da-contracts --l2-contracts --system-contracts --test-contracts --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --l1-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l1-da-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l2-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --system-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --test-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database) + opts="-v -h --verbose --chain --ignore-prerequisites --help check-sqlx-data drop migrate new-migration prepare reset setup help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__check__sqlx__data) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__drop) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help) + opts="check-sqlx-data drop migrate new-migration prepare reset setup help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__migrate) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__new__migration) + opts="-v -h --database --name --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --database) + COMPREPLY=($(compgen -W "prover core" -- "${cur}")) + return 0 + ;; + --name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__prepare) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__reset) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__setup) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt) + opts="-c -v -h --check --verbose --chain --ignore-prerequisites --help rustfmt contract prettier help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__contract) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help) + opts="rustfmt contract prettier help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__prettier) + opts="-t -v -h --targets --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --targets) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__rustfmt) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__generate__genesis) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help) + opts="database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean) + opts="all containers contracts-cache" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__config__writer) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database) + opts="check-sqlx-data drop migrate new-migration prepare reset setup" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt) + opts="rustfmt contract prettier" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__generate__genesis) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__lint) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover) + opts="info insert-batch insert-version" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__send__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__snapshot) + opts="create" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__snapshot__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__status) + opts="ports" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__status__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__lint) + opts="-c -t -v -h --check --targets --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --targets) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help info insert-batch insert-version help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help) + opts="info insert-batch insert-version help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__info) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__insert__batch) + opts="-v -h --number --default --version --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --number) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__insert__version) + opts="-v -h --default --version --snark-wrapper --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --snark-wrapper) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__send__transactions) + opts="-v -h --file --private-key --l1-rpc-url --confirmations --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --private-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --confirmations) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot) + opts="-v -h --verbose --chain --ignore-prerequisites --help create help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__create) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help) + opts="create help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status) + opts="-u -v -h --url --verbose --chain --ignore-prerequisites --help ports help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -u) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help) + opts="ports help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__ports) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test) + opts="-v -h --verbose --chain --ignore-prerequisites --help integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__build) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__fees) + opts="-n -v -h --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__integration) + opts="-e -n -t -v -h --external-node --no-deps --test-pattern --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --test-pattern) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__l1__contracts) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__loadtest) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__recovery) + opts="-s -n -v -h --snapshot --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__revert) + opts="-e -n -v -h --enable-consensus --external-node --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__rust) + opts="-v -h --options --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --options) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__upgrade) + opts="-n -v -h --no-deps --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__wallet) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem) + opts="-v -h --verbose --chain --ignore-prerequisites --help create build-transactions init change-default-chain setup-observability help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__build__transactions) + opts="-o -a -v -h --sender --l1-rpc-url --out --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --sender) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__change__default__chain) + opts="-v -h --verbose --chain --ignore-prerequisites --help [NAME]" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__create) + opts="-v -h --ecosystem-name --l1-network --link-to-code --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --skip-submodules-checkout --skip-contract-compilation-override --evm-emulator --start-containers --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --ecosystem-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-network) + COMPREPLY=($(compgen -W "localhost sepolia holesky mainnet" -- "${cur}")) + return 0 + ;; + --link-to-code) + COMPREPLY=() + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o plusdirs + fi + return 0 + ;; + --chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-mode) + COMPREPLY=($(compgen -W "no-proofs gpu" -- "${cur}")) + return 0 + ;; + --wallet-creation) + COMPREPLY=($(compgen -W "localhost random empty in-file" -- "${cur}")) + return 0 + ;; + --wallet-path) + local oldifs + if [ -n "${IFS+x}" ]; then + oldifs="$IFS" + fi + IFS=$'\n' + COMPREPLY=($(compgen -f "${cur}")) + if [ -n "${oldifs+x}" ]; then + IFS="$oldifs" + fi + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o filenames + fi + return 0 + ;; + --l1-batch-commit-data-generator-mode) + COMPREPLY=($(compgen -W "rollup validium" -- "${cur}")) + return 0 + ;; + --base-token-address) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-nominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-denominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --set-as-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --start-containers) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help) + opts="create build-transactions init change-default-chain setup-observability help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__change__default__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__setup__observability) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__init) + opts="-a -d -o -v -h --deploy-erc20 --deploy-ecosystem --ecosystem-contracts-path --l1-rpc-url --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --deploy-paymaster --server-db-url --server-db-name --dont-drop --ecosystem-only --dev --observability --no-port-reallocation --skip-submodules-checkout --skip-contract-compilation-override --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --deploy-erc20) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --deploy-ecosystem) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --ecosystem-contracts-path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --deploy-paymaster) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --observability) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__setup__observability) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer) + opts="-v -h --verbose --chain --ignore-prerequisites --help init run-backend run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help) + opts="init run-backend run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__run__backend) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__init) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__run) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__run__backend) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node) + opts="-v -h --verbose --chain --ignore-prerequisites --help configs init run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__configs) + opts="-u -v -h --db-url --db-name --l1-rpc-url --use-default --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help) + opts="configs init run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__init) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__run) + opts="-a -v -h --reinit --components --enable-consensus --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --components) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --enable-consensus) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help) + opts="autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__autocomplete) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain) + opts="create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__accept__chain__ownership) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__convert__to__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__consensus__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__l2__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__multicall3) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__paymaster) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__upgrader) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis) + opts="init-database server" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__init) + opts="configs" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__init__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__initialize__bridges) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__migrate__from__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__migrate__to__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__register__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__update__token__multiplier__setter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus) + opts="set-attester-committee get-attester-committee" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus__get__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus__set__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier) + opts="run init" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev) + opts="database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean) + opts="all containers contracts-cache" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__config__writer) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database) + opts="check-sqlx-data drop migrate new-migration prepare reset setup" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt) + opts="rustfmt contract prettier" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__generate__genesis) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__lint) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover) + opts="info insert-batch insert-version" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__send__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__snapshot) + opts="create" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__snapshot__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__status) + opts="ports" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__status__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem) + opts="create build-transactions init change-default-chain setup-observability" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__change__default__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__setup__observability) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer) + opts="init run-backend run" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__run__backend) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node) + opts="configs init run" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__markdown) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__portal) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover) + opts="init setup-keys run init-bellman-cuda compressor-keys" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__compressor__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__init__bellman__cuda) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__setup__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__update) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__markdown) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__portal) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help init setup-keys run init-bellman-cuda compressor-keys help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__compressor__keys) + opts="-v -h --path --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help) + opts="init setup-keys run init-bellman-cuda compressor-keys help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__compressor__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__init__bellman__cuda) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__setup__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__init) + opts="-u -d -v -h --dev --proof-store-dir --bucket-base-url --credentials-file --bucket-name --location --project-id --shall-save-to-public-bucket --public-store-dir --public-bucket-base-url --public-credentials-file --public-bucket-name --public-location --public-project-id --clone --bellman-cuda-dir --bellman-cuda --setup-compressor-key --path --region --mode --setup-keys --setup-database --prover-db-url --prover-db-name --use-default --dont-drop --cloud-type --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --proof-store-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bucket-base-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --credentials-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bucket-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --location) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --project-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --shall-save-to-public-bucket) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --public-store-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-bucket-base-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-credentials-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-bucket-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-location) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-project-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bellman-cuda-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bellman-cuda) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --setup-compressor-key) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --region) + COMPREPLY=($(compgen -W "us europe asia" -- "${cur}")) + return 0 + ;; + --mode) + COMPREPLY=($(compgen -W "download generate" -- "${cur}")) + return 0 + ;; + --setup-keys) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --setup-database) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --use-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -u) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --dont-drop) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -d) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --cloud-type) + COMPREPLY=($(compgen -W "gcp local" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__init__bellman__cuda) + opts="-v -h --clone --bellman-cuda-dir --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --bellman-cuda-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__run) + opts="-v -h --component --round --threads --max-allocation --witness-vector-generator-count --max-allocation --docker --tag --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --component) + COMPREPLY=($(compgen -W "gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor" -- "${cur}")) + return 0 + ;; + --round) + COMPREPLY=($(compgen -W "all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler" -- "${cur}")) + return 0 + ;; + --threads) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --max-allocation) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --witness-vector-generator-count) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --max-allocation) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --docker) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --tag) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__setup__keys) + opts="-v -h --region --mode --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --region) + COMPREPLY=($(compgen -W "us europe asia" -- "${cur}")) + return 0 + ;; + --mode) + COMPREPLY=($(compgen -W "download generate" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server) + opts="-a -v -h --components --genesis --additional-args --build --uring --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --components) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__update) + opts="-c -v -h --only-config --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + esac +} + +if [[ "${BASH_VERSINFO[0]}" -eq 4 && "${BASH_VERSINFO[1]}" -ge 4 || "${BASH_VERSINFO[0]}" -gt 4 ]]; then + complete -F _zkstack -o nosort -o bashdefault -o default zkstack +else + complete -F _zkstack -o bashdefault -o default zkstack +fi diff --git a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs b/zkstack_cli/crates/zkstack/src/accept_ownership.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/accept_ownership.rs rename to zkstack_cli/crates/zkstack/src/accept_ownership.rs index a41246e1de0..e1655921345 100644 --- a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs +++ b/zkstack_cli/crates/zkstack/src/accept_ownership.rs @@ -1,13 +1,10 @@ use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, spinner::Spinner, + wallets::Wallet, }; use config::{forge_interface::script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, EcosystemConfig}; -use ethers::{ - abi::parse_abi, - contract::BaseContract, - types::{Address, H256}, -}; +use ethers::{abi::parse_abi, contract::BaseContract, types::Address}; use lazy_static::lazy_static; use xshell::Shell; @@ -31,7 +28,7 @@ pub async fn accept_admin( shell: &Shell, ecosystem_config: &EcosystemConfig, admin: Address, - governor: Option, + governor: &Wallet, target_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, @@ -62,7 +59,7 @@ pub async fn accept_owner( shell: &Shell, ecosystem_config: &EcosystemConfig, governor_contract: Address, - governor: Option, + governor: &Wallet, target_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, @@ -92,7 +89,7 @@ pub async fn set_da_validator_pair( shell: &Shell, ecosystem_config: &EcosystemConfig, chain_admin_addr: Address, - governor: Option, + governor: &Wallet, diamond_proxy_address: Address, l1_da_validator_address: Address, l2_da_validator_address: Address, @@ -129,10 +126,10 @@ pub async fn set_da_validator_pair( async fn accept_ownership( shell: &Shell, - governor: Option, + governor: &Wallet, mut forge: ForgeScript, ) -> anyhow::Result<()> { - forge = fill_forge_private_key(forge, governor)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; let spinner = Spinner::new(MSG_ACCEPTING_GOVERNANCE_SPINNER); forge.run(shell)?; diff --git a/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs b/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs new file mode 100644 index 00000000000..8e44d644f39 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs @@ -0,0 +1,13 @@ +use std::path::PathBuf; + +use clap::Parser; + +#[derive(Debug, Parser)] +pub struct AutocompleteArgs { + /// The shell to generate the autocomplete script for + #[arg(long = "generate", value_enum)] + pub generator: clap_complete::Shell, + /// The out directory to write the autocomplete script to + #[arg(short, long, default_value = "./")] + pub out: PathBuf, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/containers.rs b/zkstack_cli/crates/zkstack/src/commands/args/containers.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/containers.rs rename to zkstack_cli/crates/zkstack/src/commands/args/containers.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs similarity index 71% rename from zk_toolbox/crates/zk_inception/src/commands/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/args/mod.rs index d18b05c910e..5fa83aadf51 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs @@ -1,7 +1,9 @@ +pub use autocomplete::*; pub use containers::*; pub use run_server::*; pub use update::*; +mod autocomplete; mod containers; mod run_server; mod update; diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs b/zkstack_cli/crates/zkstack/src/commands/args/run_server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs rename to zkstack_cli/crates/zkstack/src/commands/args/run_server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/update.rs b/zkstack_cli/crates/zkstack/src/commands/args/update.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/update.rs rename to zkstack_cli/crates/zkstack/src/commands/args/update.rs diff --git a/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs b/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs new file mode 100644 index 00000000000..0f2105cd5ef --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs @@ -0,0 +1,52 @@ +use std::{ + fs::File, + io::{BufWriter, Write}, +}; + +use anyhow::Context; +use clap::CommandFactory; +use clap_complete::{generate, Generator}; +use common::logger; + +use super::args::AutocompleteArgs; +use crate::{ + messages::{msg_generate_autocomplete_file, MSG_OUTRO_AUTOCOMPLETE_GENERATION}, + ZkStack, +}; + +pub fn run(args: AutocompleteArgs) -> anyhow::Result<()> { + let filename = autocomplete_file_name(&args.generator); + let path = args.out.join(filename); + + logger::info(msg_generate_autocomplete_file( + path.to_str() + .context("the output file path is an invalid UTF8 string")?, + )); + + let file = File::create(path).context("Failed to create file")?; + let mut writer = BufWriter::new(file); + + generate_completions(args.generator, &mut writer)?; + + logger::outro(MSG_OUTRO_AUTOCOMPLETE_GENERATION); + + Ok(()) +} + +pub fn generate_completions(gen: G, buf: &mut dyn Write) -> anyhow::Result<()> { + let mut cmd = ZkStack::command(); + let cmd_name = cmd.get_name().to_string(); + + generate(gen, &mut cmd, cmd_name, buf); + + Ok(()) +} + +pub fn autocomplete_file_name(shell: &clap_complete::Shell) -> &'static str { + match shell { + clap_complete::Shell::Bash => "zkstack.sh", + clap_complete::Shell::Fish => "zkstack.fish", + clap_complete::Shell::Zsh => "_zkstack.zsh", + _ => todo!(), + } +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs b/zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs new file mode 100644 index 00000000000..cf3e2981b3c --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs @@ -0,0 +1,42 @@ +use anyhow::Context; +use common::{forge::ForgeScriptArgs, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::Shell; + +use crate::{ + accept_ownership::accept_admin, + messages::{ + MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_NOT_INITIALIZED, MSG_CHAIN_OWNERSHIP_TRANSFERRED, + MSG_L1_SECRETS_MUST_BE_PRESENTED, + }, +}; + +pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let contracts = chain_config.get_contracts_config()?; + let secrets = chain_config.get_secrets_config()?; + let l1_rpc_url = secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + + let spinner = Spinner::new(MSG_ACCEPTING_ADMIN_SPINNER); + accept_admin( + shell, + &ecosystem_config, + contracts.l1.chain_admin_addr, + &chain_config.get_wallets_config()?.governor, + contracts.l1.diamond_proxy_addr, + &args, + l1_rpc_url.clone(), + ) + .await?; + spinner.finish(); + logger::success(MSG_CHAIN_OWNERSHIP_TRANSFERRED); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs similarity index 87% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs index 255fe05de59..b62984ce9e6 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs @@ -1,14 +1,22 @@ -use std::{path::PathBuf, str::FromStr}; +use std::{ + path::{Path, PathBuf}, + str::FromStr, +}; use anyhow::{bail, Context}; -use clap::{Parser, ValueEnum}; +use clap::{Parser, ValueEnum, ValueHint}; use common::{Prompt, PromptConfirm, PromptSelect}; -use config::forge_interface::deploy_ecosystem::output::Erc20Token; +use config::{ + forge_interface::deploy_ecosystem::output::Erc20Token, traits::ReadConfigWithBasePath, + EcosystemConfig, +}; use serde::{Deserialize, Serialize}; use slugify_rs::slugify; use strum::{Display, EnumIter, IntoEnumIterator}; use types::{BaseToken, L1BatchCommitmentMode, L1Network, ProverMode, WalletCreation}; +use xshell::Shell; use zksync_basic_types::H160; +use zksync_config::GenesisConfig; use crate::{ defaults::L2_CHAIN_ID, @@ -18,6 +26,7 @@ use crate::{ MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT, MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP, MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT, MSG_BASE_TOKEN_SELECTION_PROMPT, MSG_CHAIN_ID_HELP, MSG_CHAIN_ID_PROMPT, MSG_CHAIN_ID_VALIDATOR_ERR, MSG_CHAIN_NAME_PROMPT, + MSG_EVM_EMULATOR_HASH_MISSING_ERR, MSG_EVM_EMULATOR_HELP, MSG_EVM_EMULATOR_PROMPT, MSG_L1_BATCH_COMMIT_DATA_GENERATOR_MODE_PROMPT, MSG_L1_COMMIT_DATA_GENERATOR_MODE_HELP, MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR, MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR, MSG_PROVER_MODE_HELP, MSG_PROVER_VERSION_PROMPT, MSG_SET_AS_DEFAULT_HELP, @@ -53,7 +62,7 @@ pub struct ChainCreateArgs { prover_mode: Option, #[clap(long, help = MSG_WALLET_CREATION_HELP, value_enum)] wallet_creation: Option, - #[clap(long, help = MSG_WALLET_PATH_HELP)] + #[clap(long, help = MSG_WALLET_PATH_HELP, value_hint = ValueHint::FilePath)] wallet_path: Option, #[clap(long, help = MSG_L1_COMMIT_DATA_GENERATOR_MODE_HELP)] l1_batch_commit_data_generator_mode: Option, @@ -79,14 +88,18 @@ pub struct ChainCreateArgs { default_missing_value = "true" )] pub skip_contract_compilation_override: bool, + #[arg(long, help = MSG_EVM_EMULATOR_HELP, default_missing_value = "true", num_args = 0..=1)] + evm_emulator: Option, } impl ChainCreateArgs { pub fn fill_values_with_prompt( self, + shell: &Shell, number_of_chains: u32, l1_network: &L1Network, possible_erc20: Vec, + link_to_code: &Path, ) -> anyhow::Result { let mut chain_name = self .chain_name @@ -139,7 +152,7 @@ impl ChainCreateArgs { .ask() }); - let wallet_path: Option = if self.wallet_creation == Some(WalletCreation::InFile) { + let wallet_path: Option = if wallet_creation == WalletCreation::InFile { Some(self.wallet_path.unwrap_or_else(|| { Prompt::new(MSG_WALLET_PATH_PROMPT) .validate_with(|val: &String| { @@ -223,6 +236,25 @@ impl ChainCreateArgs { } }; + let default_genesis_config = GenesisConfig::read_with_base_path( + shell, + EcosystemConfig::default_configs_path(link_to_code), + ) + .context("failed reading genesis config")?; + let has_evm_emulation_support = default_genesis_config.evm_emulator_hash.is_some(); + let evm_emulator = self.evm_emulator.unwrap_or_else(|| { + if !has_evm_emulation_support { + false + } else { + PromptConfirm::new(MSG_EVM_EMULATOR_PROMPT) + .default(false) + .ask() + } + }); + if !has_evm_emulation_support && evm_emulator { + bail!(MSG_EVM_EMULATOR_HASH_MISSING_ERR); + } + let set_as_default = self.set_as_default.unwrap_or_else(|| { PromptConfirm::new(MSG_SET_AS_DEFAULT_PROMPT) .default(true) @@ -241,6 +273,7 @@ impl ChainCreateArgs { legacy_bridge: self.legacy_bridge, skip_submodules_checkout: self.skip_submodules_checkout, skip_contract_compilation_override: self.skip_contract_compilation_override, + evm_emulator, }) } } @@ -258,6 +291,7 @@ pub struct ChainCreateArgsFinal { pub legacy_bridge: bool, pub skip_submodules_checkout: bool, pub skip_contract_compilation_override: bool, + pub evm_emulator: bool, } #[derive(Debug, Clone, EnumIter, Display, PartialEq, Eq)] diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs similarity index 55% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs index 483b78e9b26..f990cbfd77d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use clap::Parser; use common::{db::DatabaseConfig, Prompt}; use config::ChainConfig; @@ -6,11 +7,10 @@ use slugify_rs::slugify; use url::Url; use crate::{ - defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL, DATABASE_SERVER_URL}, + defaults::{generate_db_names, DBNames, DATABASE_SERVER_URL}, messages::{ - msg_prover_db_name_prompt, msg_prover_db_url_prompt, msg_server_db_name_prompt, - msg_server_db_url_prompt, MSG_PROVER_DB_NAME_HELP, MSG_PROVER_DB_URL_HELP, - MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, + msg_server_db_name_prompt, msg_server_db_url_prompt, MSG_SERVER_DB_NAME_HELP, + MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, }, }; @@ -20,27 +20,19 @@ pub struct GenesisArgs { pub server_db_url: Option, #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] pub server_db_name: Option, - #[clap(long, help = MSG_PROVER_DB_URL_HELP)] - pub prover_db_url: Option, - #[clap(long, help = MSG_PROVER_DB_NAME_HELP)] - pub prover_db_name: Option, #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] - pub use_default: bool, + pub dev: bool, #[clap(long, short, action)] pub dont_drop: bool, } impl GenesisArgs { pub fn fill_values_with_prompt(self, config: &ChainConfig) -> GenesisArgsFinal { - let DBNames { - server_name, - prover_name, - } = generate_db_names(config); + let DBNames { server_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); - if self.use_default { + if self.dev { GenesisArgsFinal { server_db: DatabaseConfig::new(DATABASE_SERVER_URL.clone(), server_name), - prover_db: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop: self.dont_drop, } } else { @@ -57,31 +49,44 @@ impl GenesisArgs { }), separator = "_" ); - let prover_db_url = self.prover_db_url.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_url_prompt(&chain_name)) - .default(DATABASE_PROVER_URL.as_str()) - .ask() - }); - let prover_db_name = slugify!( - &self.prover_db_name.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_name_prompt(&chain_name)) - .default(&prover_name) - .ask() - }), - separator = "_" - ); GenesisArgsFinal { server_db: DatabaseConfig::new(server_db_url, server_db_name), - prover_db: DatabaseConfig::new(prover_db_url, prover_db_name), dont_drop: self.dont_drop, } } } + + pub fn fill_values_with_secrets( + mut self, + chain_config: &ChainConfig, + ) -> anyhow::Result { + let secrets = chain_config.get_secrets_config()?; + let database = secrets + .database + .context("Database secrets must be present")?; + + let (server_db_url, server_db_name) = if let Some(db_full_url) = database.server_url { + let db_config = DatabaseConfig::from_url(db_full_url.expose_url()) + .context("Invalid server database URL")?; + (Some(db_config.url), Some(db_config.name)) + } else { + (None, None) + }; + + self.server_db_url = self.server_db_url.or(server_db_url); + self.server_db_name = self.server_db_name.or(server_db_name); + + Ok(self.fill_values_with_prompt(chain_config)) + } + + pub fn reset_db_names(&mut self) { + self.server_db_name = None; + self.server_db_url = None; + } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GenesisArgsFinal { pub server_db: DatabaseConfig, - pub prover_db: DatabaseConfig, pub dont_drop: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs similarity index 50% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs index fbdd71a7724..b34809643cf 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs @@ -1,51 +1,42 @@ use clap::Parser; -use common::{forge::ForgeScriptArgs, Prompt}; +use common::Prompt; use config::ChainConfig; use serde::{Deserialize, Serialize}; use types::L1Network; use url::Url; -use super::genesis::GenesisArgsFinal; use crate::{ - commands::chain::args::genesis::GenesisArgs, + commands::chain::args::{ + genesis::{GenesisArgs, GenesisArgsFinal}, + init::InitArgsFinal, + }, defaults::LOCAL_RPC_URL, messages::{ - MSG_DEPLOY_PAYMASTER_PROMPT, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, - MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, + MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, + MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, }, }; #[derive(Debug, Clone, Serialize, Deserialize, Parser)] -pub struct InitArgs { - /// All ethereum environment related arguments - #[clap(flatten)] - #[serde(flatten)] - pub forge_args: ForgeScriptArgs, +pub struct InitConfigsArgs { #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] #[serde(flatten)] pub genesis_args: GenesisArgs, - #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub deploy_paymaster: Option, #[clap(long, help = MSG_L1_RPC_URL_HELP)] pub l1_rpc_url: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, - #[clap( - long, - help = "Skip submodules checkout", - default_missing_value = "true" - )] - pub skip_submodules_checkout: bool, } -impl InitArgs { - pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitArgsFinal { - let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { - common::PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) - .default(true) - .ask() - }); +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct InitConfigsArgsFinal { + pub genesis_args: GenesisArgsFinal, + pub l1_rpc_url: String, + pub no_port_reallocation: bool, +} +impl InitConfigsArgs { + pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitConfigsArgsFinal { let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); if config.l1_network == L1Network::Localhost { @@ -60,23 +51,20 @@ impl InitArgs { .ask() }); - InitArgsFinal { - forge_args: self.forge_args, + InitConfigsArgsFinal { genesis_args: self.genesis_args.fill_values_with_prompt(config), - deploy_paymaster, l1_rpc_url, no_port_reallocation: self.no_port_reallocation, - skip_submodules_checkout: self.skip_submodules_checkout, } } } -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct InitArgsFinal { - pub forge_args: ForgeScriptArgs, - pub genesis_args: GenesisArgsFinal, - pub deploy_paymaster: bool, - pub l1_rpc_url: String, - pub no_port_reallocation: bool, - pub skip_submodules_checkout: bool, +impl InitConfigsArgsFinal { + pub fn from_chain_init_args(init_args: &InitArgsFinal) -> InitConfigsArgsFinal { + InitConfigsArgsFinal { + genesis_args: init_args.genesis_args.clone(), + l1_rpc_url: init_args.l1_rpc_url.clone(), + no_port_reallocation: init_args.no_port_reallocation, + } + } } diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs new file mode 100644 index 00000000000..b2697db6377 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs @@ -0,0 +1,110 @@ +use clap::Parser; +use common::{forge::ForgeScriptArgs, Prompt}; +use config::ChainConfig; +use serde::{Deserialize, Serialize}; +use types::L1Network; +use url::Url; + +use crate::{ + commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, + defaults::LOCAL_RPC_URL, + messages::{ + MSG_DEPLOY_PAYMASTER_PROMPT, MSG_DEV_ARG_HELP, MSG_L1_RPC_URL_HELP, + MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, + MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, + }, +}; + +pub mod configs; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser)] +pub struct InitArgs { + /// All ethereum environment related arguments + #[clap(flatten)] + #[serde(flatten)] + pub forge_args: ForgeScriptArgs, + #[clap(long, help = MSG_SERVER_DB_URL_HELP)] + pub server_db_url: Option, + #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] + pub server_db_name: Option, + #[clap(long, short, action)] + pub dont_drop: bool, + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub deploy_paymaster: Option, + #[clap(long, help = MSG_L1_RPC_URL_HELP)] + pub l1_rpc_url: Option, + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] + pub no_port_reallocation: bool, + #[clap(long, help = MSG_DEV_ARG_HELP)] + pub dev: bool, + #[clap( + long, + help = "Skip submodules checkout", + default_missing_value = "true" + )] + pub skip_submodules_checkout: bool, +} + +impl InitArgs { + pub fn get_genesis_args(&self) -> GenesisArgs { + GenesisArgs { + server_db_url: self.server_db_url.clone(), + server_db_name: self.server_db_name.clone(), + dev: self.dev, + dont_drop: self.dont_drop, + } + } + + pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitArgsFinal { + let genesis = self.get_genesis_args(); + + let deploy_paymaster = if self.dev { + true + } else { + self.deploy_paymaster.unwrap_or_else(|| { + common::PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) + .default(true) + .ask() + }) + }; + + let l1_rpc_url = if self.dev { + LOCAL_RPC_URL.to_string() + } else { + self.l1_rpc_url.unwrap_or_else(|| { + let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); + if config.l1_network == L1Network::Localhost { + prompt = prompt.default(LOCAL_RPC_URL); + } + prompt + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }) + }; + + InitArgsFinal { + forge_args: self.forge_args, + genesis_args: genesis.fill_values_with_prompt(config), + deploy_paymaster, + l1_rpc_url, + no_port_reallocation: self.no_port_reallocation, + dev: self.dev, + skip_submodules_checkout: self.skip_submodules_checkout, + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct InitArgsFinal { + pub forge_args: ForgeScriptArgs, + pub genesis_args: GenesisArgsFinal, + pub deploy_paymaster: bool, + pub l1_rpc_url: String, + pub no_port_reallocation: bool, + pub dev: bool, + pub skip_submodules_checkout: bool, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs index 98b2e226cc1..d3953c65659 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs @@ -6,9 +6,10 @@ use config::{ use ethers::utils::hex::ToHex; use xshell::Shell; -use super::common::register_chain; use crate::{ - commands::chain::args::build_transactions::BuildTransactionsArgs, + commands::chain::{ + args::build_transactions::BuildTransactionsArgs, register_chain::register_chain, + }, messages::{ MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER, MSG_CHAIN_NOT_FOUND_ERR, MSG_CHAIN_TRANSACTIONS_BUILT, MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG, @@ -41,7 +42,7 @@ pub(crate) async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::R logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, &chain_config); + update_from_chain_config(&mut genesis_config, &chain_config)?; // Copy ecosystem contracts let mut contracts_config = config diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs similarity index 58% rename from zk_toolbox/crates/zk_inception/src/commands/chain/common.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/common.rs index 3d8b887a3a9..e0aa0b4e047 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs @@ -1,66 +1,12 @@ -use common::{ - forge::{Forge, ForgeScriptArgs}, - spinner::Spinner, -}; -use config::{ - forge_interface::{ - register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, - script_params::REGISTER_CHAIN_SCRIPT_PARAMS, - }, - traits::{ReadConfig, SaveConfig}, - ChainConfig, ContractsConfig, EcosystemConfig, -}; +use common::spinner::Spinner; +use config::{ChainConfig, EcosystemConfig}; use types::{BaseToken, L1Network, WalletCreation}; -use xshell::Shell; use crate::{ consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{MSG_DISTRIBUTING_ETH_SPINNER, MSG_MINT_BASE_TOKEN_SPINNER}, - utils::forge::{check_the_balance, fill_forge_private_key}, }; -#[allow(clippy::too_many_arguments)] -pub async fn register_chain( - shell: &Shell, - forge_args: ForgeScriptArgs, - config: &EcosystemConfig, - chain_config: &ChainConfig, - contracts: &mut ContractsConfig, - l1_rpc_url: String, - sender: Option, - broadcast: bool, -) -> anyhow::Result<()> { - let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); - - let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; - deploy_config.save(shell, deploy_config_path)?; - - let mut forge = Forge::new(&config.path_to_l1_foundry()) - .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) - .with_ffi() - .with_rpc_url(l1_rpc_url); - - if broadcast { - forge = forge.with_broadcast(); - } - - if let Some(address) = sender { - forge = forge.with_sender(address); - } else { - forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; - check_the_balance(&forge).await?; - } - - forge.run(shell)?; - - let register_chain_output = RegisterChainOutput::read( - shell, - REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - contracts.set_chain_contracts(®ister_chain_output); - Ok(()) -} - // Distribute eth to the chain wallets for localhost environment pub async fn distribute_eth( ecosystem_config: &EcosystemConfig, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/convert_to_gateway.rs b/zkstack_cli/crates/zkstack/src/commands/chain/convert_to_gateway.rs similarity index 95% rename from zk_toolbox/crates/zk_inception/src/commands/chain/convert_to_gateway.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/convert_to_gateway.rs index 5e3021ba0f5..fd5b7f5414f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/convert_to_gateway.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/convert_to_gateway.rs @@ -2,6 +2,7 @@ use anyhow::Context; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, + wallets::Wallet, }; use config::{ forge_interface::{ @@ -92,7 +93,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { .unwrap(), &ecosystem_config, &chain_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url, ) .await?; @@ -132,7 +133,7 @@ async fn deploy_gateway_ctm( .with_broadcast(); // Governor private key should not be needed for this script - forge = fill_forge_private_key(forge, config.get_wallets()?.deployer_private_key())?; + forge = fill_forge_private_key(forge, config.get_wallets()?.deployer.as_ref())?; check_the_balance(&forge).await?; forge.run(shell)?; @@ -162,7 +163,7 @@ async fn gateway_governance_whitelisting( .unwrap(), config, chain_config, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, l1_rpc_url.clone(), ) .await? @@ -184,7 +185,7 @@ async fn gateway_governance_whitelisting( .unwrap(), config, chain_config, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, l1_rpc_url.clone(), ) .await? @@ -204,7 +205,7 @@ async fn gateway_governance_whitelisting( .unwrap(), config, chain_config, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, l1_rpc_url.clone(), ) .await? @@ -227,7 +228,7 @@ async fn gateway_governance_whitelisting( .unwrap(), config, chain_config, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, l1_rpc_url.clone(), ) .await? @@ -248,7 +249,7 @@ async fn call_script( data: &Bytes, config: &EcosystemConfig, chain_config: &ChainConfig, - private_key: Option, + governor: &Wallet, l1_rpc_url: String, ) -> anyhow::Result { let mut forge = Forge::new(&config.path_to_l1_foundry()) @@ -259,7 +260,7 @@ async fn call_script( .with_calldata(data); // Governor private key is required for this script - forge = fill_forge_private_key(forge, private_key)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs similarity index 97% rename from zk_toolbox/crates/zk_inception/src/commands/chain/create.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/create.rs index 48a320ec27e..bdf5711e321 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs @@ -30,9 +30,11 @@ fn create( let tokens = ecosystem_config.get_erc20_tokens(); let args = args .fill_values_with_prompt( + shell, ecosystem_config.list_of_chains().len() as u32, &ecosystem_config.l1_network, tokens, + &ecosystem_config.link_to_code, ) .context(MSG_ARGS_VALIDATOR_ERR)?; @@ -89,6 +91,7 @@ pub(crate) fn create_chain_inner( wallet_creation: args.wallet_creation, shell: OnceCell::from(shell.clone()), legacy_bridge, + evm_emulator: args.evm_emulator, }; create_wallets( diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs similarity index 89% rename from zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs index 1a4b1cb3608..578069546f9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs @@ -2,6 +2,7 @@ use std::path::Path; use anyhow::Context; use common::{ + // contracts::build_l2_contracts, forge::{Forge, ForgeScriptArgs}, hardhat::build_l2_contracts, spinner::Spinner, @@ -35,6 +36,7 @@ pub enum Deploy2ContractsOption { Upgrader, InitiailizeBridges, ConsensusRegistry, + Multicall3, } pub async fn run( @@ -82,6 +84,16 @@ pub async fn run( ) .await?; } + Deploy2ContractsOption::Multicall3 => { + deploy_multicall3( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + } Deploy2ContractsOption::InitiailizeBridges => { initialize_bridges( shell, @@ -184,6 +196,24 @@ pub async fn deploy_consensus_registry( .await } +pub async fn deploy_multicall3( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + build_and_deploy( + shell, + chain_config, + ecosystem_config, + forge_args, + Some("runDeployMulticall3"), + |shell, out| contracts_config.set_multicall3(&Multicall3Output::read(shell, out)?), + ) + .await +} + pub async fn deploy_l2_contracts( shell: &Shell, chain_config: &ChainConfig, @@ -254,10 +284,7 @@ async fn call_forge( forge = forge.with_signature(signature); } - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&ecosystem_config.get_wallets()?.governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs index 0da56f0c962..4a93fcc089f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs @@ -56,10 +56,7 @@ pub async fn deploy_paymaster( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key( - forge, - chain_config.get_wallets_config()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&chain_config.get_wallets_config()?.governor))?; } if broadcast { diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs new file mode 100644 index 00000000000..edf480946be --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs @@ -0,0 +1,118 @@ +use std::path::PathBuf; + +use anyhow::Context; +use common::{ + config::global_config, + db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, + logger, +}; +use config::{ + override_config, set_file_artifacts, set_rocks_db_config, set_server_database, + traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig, FileArtifacts, +}; +use types::ProverMode; +use xshell::Shell; +use zksync_basic_types::commitment::L1BatchCommitmentMode; + +use crate::{ + commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, + consts::{ + PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, + SERVER_MIGRATIONS, + }, + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, + MSG_GENESIS_DATABASES_INITIALIZED, MSG_INITIALIZING_SERVER_DATABASE, + MSG_RECREATE_ROCKS_DB_ERRROR, + }, + utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, +}; + +pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + let mut secrets = chain_config.get_secrets_config()?; + let args = args.fill_values_with_secrets(&chain_config)?; + set_server_database(&mut secrets, &args.server_db)?; + secrets.save_with_base_path(shell, &chain_config.configs)?; + + initialize_server_database( + shell, + &args.server_db, + chain_config.link_to_code.clone(), + args.dont_drop, + ) + .await?; + logger::outro(MSG_GENESIS_DATABASES_INITIALIZED); + + Ok(()) +} + +pub async fn initialize_server_database( + shell: &Shell, + server_db_config: &DatabaseConfig, + link_to_code: PathBuf, + dont_drop: bool, +) -> anyhow::Result<()> { + let path_to_server_migration = link_to_code.join(SERVER_MIGRATIONS); + + if global_config().verbose { + logger::debug(MSG_INITIALIZING_SERVER_DATABASE) + } + if !dont_drop { + drop_db_if_exists(server_db_config) + .await + .context(MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR)?; + init_db(server_db_config).await?; + } + migrate_db( + shell, + path_to_server_migration, + &server_db_config.full_url(), + ) + .await?; + + Ok(()) +} + +pub fn update_configs( + args: GenesisArgsFinal, + shell: &Shell, + config: &ChainConfig, +) -> anyhow::Result<()> { + shell.create_dir(&config.rocks_db_path)?; + + // Update secrets configs + let mut secrets = config.get_secrets_config()?; + set_server_database(&mut secrets, &args.server_db)?; + secrets.save_with_base_path(shell, &config.configs)?; + + // Update general config + let mut general = config.get_general_config()?; + let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) + .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; + let file_artifacts = FileArtifacts::new(config.artifacts.clone()); + set_rocks_db_config(&mut general, rocks_db)?; + set_file_artifacts(&mut general, file_artifacts); + general.save_with_base_path(shell, &config.configs)?; + + let link_to_code = config.link_to_code.clone(); + if config.prover_version != ProverMode::NoProofs { + override_config( + shell, + link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), + config, + )?; + } + if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { + override_config( + shell, + link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), + config, + )?; + } + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs new file mode 100644 index 00000000000..c1cc03174ae --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs @@ -0,0 +1,92 @@ +use anyhow::Context; +use clap::{command, Parser, Subcommand}; +use common::{logger, spinner::Spinner}; +use config::{ChainConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::{ + commands::chain::{ + args::genesis::{GenesisArgs, GenesisArgsFinal}, + genesis::{self, database::initialize_server_database, server::run_server_genesis}, + }, + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, + MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, MSG_STARTING_GENESIS_SPINNER, + }, +}; + +// Genesis subcommands +pub mod database; +pub mod server; + +#[derive(Subcommand, Debug, Clone)] +pub enum GenesisSubcommands { + /// Initialize databases + #[command(alias = "database")] + InitDatabase(Box), + /// Runs server genesis + Server, +} + +#[derive(Parser, Debug)] +#[command()] +pub struct GenesisCommand { + #[command(subcommand)] + command: Option, + #[clap(flatten)] + args: GenesisArgs, +} + +pub(crate) async fn run(args: GenesisCommand, shell: &Shell) -> anyhow::Result<()> { + match args.command { + Some(GenesisSubcommands::InitDatabase(args)) => database::run(*args, shell).await, + Some(GenesisSubcommands::Server) => server::run(shell).await, + None => run_genesis(args.args, shell).await, + } +} + +pub async fn run_genesis(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let args = args.fill_values_with_prompt(&chain_config); + + genesis(args, shell, &chain_config).await?; + logger::outro(MSG_GENESIS_COMPLETED); + + Ok(()) +} + +pub async fn genesis( + args: GenesisArgsFinal, + shell: &Shell, + config: &ChainConfig, +) -> anyhow::Result<()> { + genesis::database::update_configs(args.clone(), shell, config)?; + + logger::note( + MSG_SELECTED_CONFIG, + logger::object_to_string(serde_json::json!({ + "chain_config": config, + "server_db_config": args.server_db, + })), + ); + logger::info(MSG_STARTING_GENESIS); + + let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); + initialize_server_database( + shell, + &args.server_db, + config.link_to_code.clone(), + args.dont_drop, + ) + .await?; + spinner.finish(); + + let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); + run_server_genesis(config, shell)?; + spinner.finish(); + + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs new file mode 100644 index 00000000000..090792e8007 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs @@ -0,0 +1,47 @@ +use anyhow::Context; +use common::{ + logger, + server::{Server, ServerMode}, + spinner::Spinner, +}; +use config::{ + traits::FileConfigWithDefaultName, ChainConfig, ContractsConfig, EcosystemConfig, + GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, +}; +use xshell::Shell; + +use crate::messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_RUN_SERVER_ERR, MSG_GENESIS_COMPLETED, + MSG_STARTING_GENESIS_SPINNER, +}; + +pub async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); + run_server_genesis(&chain_config, shell)?; + spinner.finish(); + logger::outro(MSG_GENESIS_COMPLETED); + + Ok(()) +} + +pub fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { + let server = Server::new(None, chain_config.link_to_code.clone(), false); + server + .run( + shell, + ServerMode::Genesis, + GenesisConfig::get_path_with_base_path(&chain_config.configs), + WalletsConfig::get_path_with_base_path(&chain_config.configs), + GeneralConfig::get_path_with_base_path(&chain_config.configs), + SecretsConfig::get_path_with_base_path(&chain_config.configs), + ContractsConfig::get_path_with_base_path(&chain_config.configs), + None, + vec![], + ) + .context(MSG_FAILED_TO_RUN_SERVER_ERR) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs new file mode 100644 index 00000000000..31c5c681e7d --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -0,0 +1,108 @@ +use anyhow::Context; +use common::logger; +use config::{ + copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use ethers::types::Address; +use xshell::Shell; + +use crate::{ + commands::{ + chain::{ + args::init::configs::{InitConfigsArgs, InitConfigsArgsFinal}, + genesis, + }, + portal::update_portal_config, + }, + messages::{ + MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_CONSENSUS_CONFIG_MISSING_ERR, + MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, + }, + utils::{ + consensus::{generate_consensus_keys, get_consensus_secrets, get_genesis_specs}, + ports::EcosystemPortsScanner, + }, +}; + +pub async fn run(args: InitConfigsArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let args = args.fill_values_with_prompt(&chain_config); + + init_configs(&args, shell, &ecosystem_config, &chain_config).await?; + logger::outro(MSG_CHAIN_CONFIGS_INITIALIZED); + + Ok(()) +} + +pub async fn init_configs( + init_args: &InitConfigsArgsFinal, + shell: &Shell, + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, +) -> anyhow::Result { + // Port scanner should run before copying configs to avoid marking initial ports as assigned + let mut ecosystem_ports = EcosystemPortsScanner::scan(shell)?; + copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; + + if !init_args.no_port_reallocation { + ecosystem_ports.allocate_ports_in_yaml( + shell, + &chain_config.path_to_general_config(), + chain_config.id, + )?; + } + + let mut general_config = chain_config.get_general_config()?; + + if general_config.proof_data_handler_config.is_some() && general_config.prover_gateway.is_some() + { + let proof_data_handler_config = general_config.proof_data_handler_config.clone().unwrap(); + let mut prover_gateway = general_config.prover_gateway.clone().unwrap(); + + prover_gateway.api_url = + format!("http://127.0.0.1:{}", proof_data_handler_config.http_port); + + general_config.prover_gateway = Some(prover_gateway); + } + + let mut consensus_config = general_config + .consensus_config + .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; + + let consensus_keys = generate_consensus_keys(); + consensus_config.genesis_spec = Some(get_genesis_specs(chain_config, &consensus_keys)); + + general_config.consensus_config = Some(consensus_config); + general_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize genesis config + let mut genesis_config = chain_config.get_genesis_config()?; + update_from_chain_config(&mut genesis_config, chain_config)?; + genesis_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize contracts config + let mut contracts_config = ecosystem_config.get_contracts_config()?; + contracts_config.l1.diamond_proxy_addr = Address::zero(); + contracts_config.l1.governance_addr = Address::zero(); + contracts_config.l1.chain_admin_addr = Address::zero(); + contracts_config.l1.base_token_addr = chain_config.base_token.address; + contracts_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize secrets config + let mut secrets = chain_config.get_secrets_config()?; + set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; + secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); + secrets.save_with_base_path(shell, &chain_config.configs)?; + + genesis::database::update_configs(init_args.genesis_args.clone(), shell, chain_config)?; + + update_portal_config(shell, chain_config) + .await + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + Ok(contracts_config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs similarity index 58% rename from zk_toolbox/crates/zk_inception/src/commands/chain/init.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs index 53e49955f5e..8157a131815 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs @@ -1,45 +1,64 @@ use anyhow::Context; +use clap::{command, Parser, Subcommand}; use common::{git, logger, spinner::Spinner}; -use config::{ - copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, - ChainConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, -}; +use config::{traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig}; use types::{BaseToken, L1BatchCommitmentMode}; use xshell::Shell; -use super::common::{distribute_eth, mint_base_token, register_chain}; use crate::{ accept_ownership::{accept_admin, set_da_validator_pair}, - commands::{ - chain::{ - args::init::{InitArgs, InitArgsFinal}, - deploy_l2_contracts, deploy_paymaster, - genesis::genesis, - set_token_multiplier_setter::set_token_multiplier_setter, - setup_legacy_bridge::setup_legacy_bridge, + commands::chain::{ + args::init::{ + configs::{InitConfigsArgs, InitConfigsArgsFinal}, + InitArgs, InitArgsFinal, }, - portal::update_portal_config, + common::{distribute_eth, mint_base_token}, + deploy_l2_contracts, deploy_paymaster, + genesis::genesis, + init::configs::init_configs, + register_chain::register_chain, + set_token_multiplier_setter::set_token_multiplier_setter, + setup_legacy_bridge::setup_legacy_bridge, }, - defaults::PORT_RANGE_END, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_DA_PAIR_REGISTRATION_SPINNER, MSG_DEPLOYING_PAYMASTER, - MSG_GENESIS_DATABASE_ERR, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, - MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, + MSG_GENESIS_DATABASE_ERR, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, - utils::{ - consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, - ports::EcosystemPortsScanner, - }, }; -pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { +// Init subcommands +pub mod configs; + +#[derive(Subcommand, Debug, Clone)] +pub enum ChainInitSubcommands { + /// Initialize chain configs + Configs(InitConfigsArgs), +} + +#[derive(Parser, Debug)] +#[command()] +pub struct ChainInitCommand { + #[command(subcommand)] + command: Option, + #[clap(flatten)] + args: InitArgs, +} + +pub(crate) async fn run(args: ChainInitCommand, shell: &Shell) -> anyhow::Result<()> { + match args.command { + Some(ChainInitSubcommands::Configs(args)) => configs::run(args, shell).await, + None => run_init(args.args, shell).await, + } +} + +async fn run_init(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { let config = EcosystemConfig::from_file(shell)?; let chain_config = config .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let mut args = args.fill_values_with_prompt(&chain_config); + let args = args.fill_values_with_prompt(&chain_config); logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); logger::info(msg_initializing_chain("")); @@ -47,63 +66,28 @@ pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { git::submodule_update(shell, config.link_to_code.clone())?; } - init(&mut args, shell, &config, &chain_config).await?; + init(&args, shell, &config, &chain_config).await?; logger::success(MSG_CHAIN_INITIALIZED); Ok(()) } pub async fn init( - init_args: &mut InitArgsFinal, + init_args: &InitArgsFinal, shell: &Shell, ecosystem_config: &EcosystemConfig, chain_config: &ChainConfig, ) -> anyhow::Result<()> { - let mut ecosystem_ports = EcosystemPortsScanner::scan(shell)?; - copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; - - if !init_args.no_port_reallocation { - ecosystem_ports.allocate_ports_in_yaml( - shell, - &chain_config.path_to_general_config(), - chain_config.id, - )?; - } - let mut general_config = chain_config.get_general_config()?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - let offset = ((chain_config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = - ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; - - let consensus_keys = generate_consensus_keys(); - let consensus_config = get_consensus_config( - chain_config, - consensus_port, - Some(consensus_keys.clone()), - None, - )?; - general_config.consensus_config = Some(consensus_config); - general_config.save_with_base_path(shell, &chain_config.configs)?; - - let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, chain_config); - genesis_config.save_with_base_path(shell, &chain_config.configs)?; - - // Copy ecosystem contracts - let mut contracts_config = ecosystem_config.get_contracts_config()?; - contracts_config.l1.base_token_addr = chain_config.base_token.address; - contracts_config.save_with_base_path(shell, &chain_config.configs)?; + // Initialize configs + let init_configs_args = InitConfigsArgsFinal::from_chain_init_args(init_args); + let mut contracts_config = + init_configs(&init_configs_args, shell, ecosystem_config, chain_config).await?; + // Fund some wallet addresses with ETH or base token (only for Localhost) distribute_eth(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; mint_base_token(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; - let mut secrets = chain_config.get_secrets_config()?; - set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; - secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); - secrets.save_with_base_path(shell, &chain_config.configs)?; - + // Register chain on BridgeHub (run by L1 Governor) let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); register_chain( shell, @@ -118,12 +102,14 @@ pub async fn init( .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; spinner.finish(); + + // Accept ownership for DiamondProxy (run by L2 Governor) let spinner = Spinner::new(MSG_ACCEPTING_ADMIN_SPINNER); accept_admin( shell, ecosystem_config, contracts_config.l1.chain_admin_addr, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.diamond_proxy_addr, &init_args.forge_args.clone(), init_args.l1_rpc_url.clone(), @@ -131,13 +117,14 @@ pub async fn init( .await?; spinner.finish(); + // Set token multiplier setter address (run by L2 Governor) if chain_config.base_token != BaseToken::eth() { let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); let chain_contracts = chain_config.get_contracts_config()?; set_token_multiplier_setter( shell, ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, chain_contracts.l1.access_control_restriction_addr, chain_contracts.l1.diamond_proxy_addr, chain_config @@ -153,6 +140,7 @@ pub async fn init( spinner.finish(); } + // Deploy L2 contracts: L2SharedBridge, L2DefaultUpgrader, ... (run by L1 Governor) deploy_l2_contracts::deploy_l2_contracts( shell, chain_config, @@ -177,7 +165,7 @@ pub async fn init( shell, ecosystem_config, contracts_config.l1.chain_admin_addr, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.diamond_proxy_addr, l1_da_validator_addr, contracts_config.l2.da_validator_addr, @@ -187,6 +175,7 @@ pub async fn init( .await?; spinner.finish(); + // Setup legacy bridge - shouldn't be used for new chains (run by L1 Governor) if let Some(true) = chain_config.legacy_bridge { setup_legacy_bridge( shell, @@ -198,6 +187,7 @@ pub async fn init( .await?; } + // Deploy Paymaster contract (run by L2 Governor) if init_args.deploy_paymaster { let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); deploy_paymaster::deploy_paymaster( @@ -217,9 +207,5 @@ pub async fn init( .await .context(MSG_GENESIS_DATABASE_ERR)?; - update_portal_config(shell, chain_config) - .await - .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; - Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_from_gateway.rs b/zkstack_cli/crates/zkstack/src/commands/chain/migrate_from_gateway.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/chain/migrate_from_gateway.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/migrate_from_gateway.rs index 19b0042037d..dca212778fa 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_from_gateway.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/migrate_from_gateway.rs @@ -3,6 +3,7 @@ use clap::Parser; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, + wallets::Wallet, withdraw::ZKSProvider, }; use config::{ @@ -24,8 +25,9 @@ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use xshell::Shell; -use zksync_basic_types::{settlement::SettlementMode, H256, U256, U64}; -use zksync_config::configs::eth_sender::PubdataSendingMode; +use zksync_basic_types::{ + pubdata_da::PubdataSendingMode, settlement::SettlementMode, H256, U256, U64, +}; use zksync_types::L2ChainId; use zksync_web3_decl::client::{Client, L2}; @@ -117,7 +119,7 @@ pub async fn run(args: MigrateFromGatewayArgs, shell: &Shell) -> anyhow::Result< ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -176,7 +178,7 @@ pub async fn run(args: MigrateFromGatewayArgs, shell: &Shell) -> anyhow::Result< ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -269,7 +271,7 @@ async fn call_script( forge_args: ForgeScriptArgs, data: &Bytes, config: &EcosystemConfig, - private_key: Option, + governor: &Wallet, rpc_url: String, ) -> anyhow::Result { let mut forge = Forge::new(&config.path_to_l1_foundry()) @@ -280,7 +282,7 @@ async fn call_script( .with_calldata(data); // Governor private key is required for this script - forge = fill_forge_private_key(forge, private_key)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_to_gateway.rs b/zkstack_cli/crates/zkstack/src/commands/chain/migrate_to_gateway.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/migrate_to_gateway.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/migrate_to_gateway.rs index 7cfb041862a..fd2a78e35da 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_to_gateway.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/migrate_to_gateway.rs @@ -3,6 +3,7 @@ use clap::Parser; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, + wallets::Wallet, }; use config::{ forge_interface::{ @@ -23,8 +24,10 @@ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use xshell::Shell; -use zksync_basic_types::{settlement::SettlementMode, Address, H256, U256, U64}; -use zksync_config::configs::{eth_sender::PubdataSendingMode, gateway::GatewayChainConfig}; +use zksync_basic_types::{ + pubdata_da::PubdataSendingMode, settlement::SettlementMode, Address, H256, U256, U64, +}; +use zksync_config::configs::gateway::GatewayChainConfig; use zksync_system_constants::L2_BRIDGEHUB_ADDRESS; use crate::{ @@ -124,9 +127,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - gateway_chain_config - .get_wallets_config()? - .governor_private_key(), + &gateway_chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -147,7 +148,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -218,7 +219,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -246,7 +247,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -268,7 +269,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -293,7 +294,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -312,7 +313,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -423,7 +424,7 @@ async fn call_script( forge_args: ForgeScriptArgs, data: &Bytes, config: &EcosystemConfig, - private_key: Option, + governor: &Wallet, l1_rpc_url: String, ) -> anyhow::Result { let mut forge = Forge::new(&config.path_to_l1_foundry()) @@ -434,7 +435,7 @@ async fn call_script( .with_calldata(data); // Governor private key is required for this script - forge = fill_forge_private_key(forge, private_key)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs similarity index 65% rename from zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/mod.rs index 877580d19a8..4846ac5e891 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs @@ -1,17 +1,18 @@ use ::common::forge::ForgeScriptArgs; use args::build_transactions::BuildTransactionsArgs; pub(crate) use args::create::ChainCreateArgsFinal; -use clap::Subcommand; +use clap::{command, Subcommand}; pub(crate) use create::create_chain_inner; use migrate_from_gateway::MigrateFromGatewayArgs; use migrate_to_gateway::MigrateToGatewayArgs; use xshell::Shell; use crate::commands::chain::{ - args::{create::ChainCreateArgs, genesis::GenesisArgs, init::InitArgs}, - deploy_l2_contracts::Deploy2ContractsOption, + args::create::ChainCreateArgs, deploy_l2_contracts::Deploy2ContractsOption, + genesis::GenesisCommand, init::ChainInitCommand, }; +mod accept_chain_ownership; pub(crate) mod args; mod build_transactions; mod common; @@ -20,9 +21,10 @@ mod create; pub mod deploy_l2_contracts; pub mod deploy_paymaster; pub mod genesis; -pub(crate) mod init; +pub mod init; mod migrate_from_gateway; mod migrate_to_gateway; +pub mod register_chain; mod set_token_multiplier_setter; mod setup_legacy_bridge; @@ -33,20 +35,35 @@ pub enum ChainCommands { /// Create unsigned transactions for chain deployment BuildTransactions(BuildTransactionsArgs), /// Initialize chain, deploying necessary contracts and performing on-chain operations - Init(InitArgs), + Init(Box), /// Run server genesis - Genesis(GenesisArgs), - /// Initialize bridges on l2 - #[command(alias = "bridge")] - InitializeBridges(ForgeScriptArgs), - /// Deploy all l2 contracts + Genesis(GenesisCommand), + /// Register a new chain on L1 (executed by L1 governor). + /// This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, + /// registers chain with BridgeHub and sets pending admin for DiamondProxy. + /// Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership` + #[command(alias = "register")] + RegisterChain(ForgeScriptArgs), + /// Deploy all L2 contracts (executed by L1 governor). #[command(alias = "l2")] DeployL2Contracts(ForgeScriptArgs), + /// Accept ownership of L2 chain (executed by L2 governor). + /// This command should be run after `register-chain` to accept ownership of newly created + /// DiamondProxy contract. + #[command(alias = "accept-ownership")] + AcceptChainOwnership(ForgeScriptArgs), + /// Initialize bridges on L2 + #[command(alias = "bridge")] + InitializeBridges(ForgeScriptArgs), /// Deploy L2 consensus registry #[command(alias = "consensus")] DeployConsensusRegistry(ForgeScriptArgs), + /// Deploy L2 multicall3 + #[command(alias = "multicall3")] + DeployMulticall3(ForgeScriptArgs), /// Deploy Default Upgrader - Upgrader(ForgeScriptArgs), + #[command(alias = "upgrader")] + DeployUpgrader(ForgeScriptArgs), /// Deploy paymaster smart contract #[command(alias = "paymaster")] DeployPaymaster(ForgeScriptArgs), @@ -63,16 +80,21 @@ pub enum ChainCommands { pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<()> { match args { ChainCommands::Create(args) => create::run(args, shell), - ChainCommands::Init(args) => init::run(args, shell).await, + ChainCommands::Init(args) => init::run(*args, shell).await, ChainCommands::BuildTransactions(args) => build_transactions::run(args, shell).await, ChainCommands::Genesis(args) => genesis::run(args, shell).await, + ChainCommands::RegisterChain(args) => register_chain::run(args, shell).await, ChainCommands::DeployL2Contracts(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::All).await } + ChainCommands::AcceptChainOwnership(args) => accept_chain_ownership::run(args, shell).await, ChainCommands::DeployConsensusRegistry(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await } - ChainCommands::Upgrader(args) => { + ChainCommands::DeployMulticall3(args) => { + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Multicall3).await + } + ChainCommands::DeployUpgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } ChainCommands::InitializeBridges(args) => { diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs b/zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs new file mode 100644 index 00000000000..db69ae47952 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs @@ -0,0 +1,96 @@ +use anyhow::Context; +use common::{ + forge::{Forge, ForgeScriptArgs}, + logger, + spinner::Spinner, +}; +use config::{ + forge_interface::{ + register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, + script_params::REGISTER_CHAIN_SCRIPT_PARAMS, + }, + traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use xshell::Shell; + +use crate::{ + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_CHAIN_REGISTERED, MSG_L1_SECRETS_MUST_BE_PRESENTED, + MSG_REGISTERING_CHAIN_SPINNER, + }, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let mut contracts = chain_config.get_contracts_config()?; + let secrets = chain_config.get_secrets_config()?; + let l1_rpc_url = secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); + register_chain( + shell, + args, + &ecosystem_config, + &chain_config, + &mut contracts, + l1_rpc_url, + None, + true, + ) + .await?; + contracts.save_with_base_path(shell, chain_config.configs)?; + spinner.finish(); + logger::success(MSG_CHAIN_REGISTERED); + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +pub async fn register_chain( + shell: &Shell, + forge_args: ForgeScriptArgs, + config: &EcosystemConfig, + chain_config: &ChainConfig, + contracts: &mut ContractsConfig, + l1_rpc_url: String, + sender: Option, + broadcast: bool, +) -> anyhow::Result<()> { + let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); + + let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; + deploy_config.save(shell, deploy_config_path)?; + + let mut forge = Forge::new(&config.path_to_l1_foundry()) + .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url); + + if broadcast { + forge = forge.with_broadcast(); + } + + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key(forge, Some(&config.get_wallets()?.governor))?; + check_the_balance(&forge).await?; + } + + forge.run(shell)?; + + let register_chain_output = RegisterChainOutput::read( + shell, + REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), + )?; + contracts.set_chain_contracts(®ister_chain_output); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs index 913e26f2843..d9d8994af87 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs @@ -3,12 +3,13 @@ use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, logger, spinner::Spinner, + wallets::Wallet, }; use config::{forge_interface::script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, EcosystemConfig}; use ethers::{abi::parse_abi, contract::BaseContract, utils::hex}; use lazy_static::lazy_static; use xshell::Shell; -use zksync_basic_types::{Address, H256}; +use zksync_basic_types::Address; use crate::{ messages::{ @@ -52,7 +53,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { set_token_multiplier_setter( shell, &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.access_control_restriction_addr, contracts_config.l1.diamond_proxy_addr, token_multiplier_setter_address, @@ -74,7 +75,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { pub async fn set_token_multiplier_setter( shell: &Shell, ecosystem_config: &EcosystemConfig, - governor: Option, + governor: &Wallet, access_control_restriction_address: Address, diamond_proxy_address: Address, new_setter_address: Address, @@ -112,10 +113,10 @@ pub async fn set_token_multiplier_setter( async fn update_token_multiplier_setter( shell: &Shell, - governor: Option, + governor: &Wallet, mut forge: ForgeScript, ) -> anyhow::Result<()> { - forge = fill_forge_private_key(forge, governor)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs b/zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs index 925014fe4e6..f61c640ffb6 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs @@ -59,10 +59,7 @@ pub async fn setup_legacy_bridge( ) .with_broadcast(); - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&ecosystem_config.get_wallets()?.governor))?; let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); check_the_balance(&forge).await?; diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs new file mode 100644 index 00000000000..c9d878c8fd3 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs @@ -0,0 +1,47 @@ +use anyhow::Context as _; +use zksync_config::configs::consensus as config; +use zksync_consensus_crypto::TextFmt as _; +use zksync_consensus_roles::attester; +use zksync_protobuf::{ProtoFmt, ProtoRepr}; + +use super::proto; +use crate::utils::consensus::parse_attester_committee; + +#[derive(Debug, Clone, PartialEq)] +pub(super) struct SetAttesterCommitteeFile { + pub attesters: attester::Committee, +} + +impl ProtoFmt for SetAttesterCommitteeFile { + type Proto = proto::SetAttesterCommitteeFile; + + fn read(r: &Self::Proto) -> anyhow::Result { + // zksync_config was not allowed to depend on consensus crates, + // therefore to parse the config we need to go through the intermediate + // representation of consensus types defined in zksync_config. + let attesters: Vec<_> = r + .attesters + .iter() + .map(|x| x.read()) + .collect::>() + .context("attesters")?; + Ok(Self { + attesters: parse_attester_committee(&attesters)?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + attesters: self + .attesters + .iter() + .map(|a| { + ProtoRepr::build(&config::WeightedAttester { + key: config::AttesterPublicKey(a.key.encode()), + weight: a.weight, + }) + }) + .collect(), + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs similarity index 79% rename from zk_toolbox/crates/zk_inception/src/commands/consensus.rs rename to zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs index 7cf96ebe5ad..1855a5943dc 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs @@ -1,10 +1,11 @@ -use std::{borrow::Borrow, collections::HashMap, sync::Arc}; +use std::{borrow::Borrow, collections::HashMap, path::PathBuf, sync::Arc}; /// Consensus registry contract operations. /// Includes code duplicated from `zksync_node_consensus::registry::abi`. use anyhow::Context as _; -use common::logger; +use common::{logger, wallets::Wallet}; use config::EcosystemConfig; +use conv::*; use ethers::{ abi::Detokenize, contract::{FunctionCall, Multicall}, @@ -19,6 +20,11 @@ use zksync_consensus_roles::{attester, validator}; use crate::{messages, utils::consensus::parse_attester_committee}; +mod conv; +mod proto; +#[cfg(test)] +mod tests; + #[allow(warnings)] mod abi { include!(concat!(env!("OUT_DIR"), "/consensus_registry_abi.rs")); @@ -65,11 +71,25 @@ fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::Bls12381Sign } } +#[derive(clap::Args, Debug)] +#[group(required = true, multiple = false)] +pub struct SetAttesterCommitteeCommand { + /// Sets the attester committee in the consensus registry contract to + /// `consensus.genesis_spec.attesters` in general.yaml. + #[clap(long)] + from_genesis: bool, + /// Sets the attester committee in the consensus registry contract to + /// the committee in the yaml file. + /// File format is definied in `commands/consensus/proto/mod.proto`. + #[clap(long)] + from_file: Option, +} + #[derive(clap::Subcommand, Debug)] pub enum Command { /// Sets the attester committee in the consensus registry contract to /// `consensus.genesis_spec.attesters` in general.yaml. - SetAttesterCommittee, + SetAttesterCommittee(SetAttesterCommitteeCommand), /// Fetches the attester committee from the consensus registry contract. GetAttesterCommittee, } @@ -154,26 +174,26 @@ impl Setup { )?) } - fn governor(&self) -> anyhow::Result> { - let governor = self + fn governor(&self) -> anyhow::Result { + Ok(self .chain .get_wallets_config() .context("get_wallets_config()")? - .governor - .private_key - .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?; - let governor = LocalWallet::from_bytes(governor.as_bytes()) - .context("LocalWallet::from_bytes()")? - .with_chain_id(self.genesis.l2_chain_id.as_u64()); + .governor) + } + + fn signer(&self, wallet: LocalWallet) -> anyhow::Result> { + let wallet = wallet.with_chain_id(self.genesis.l2_chain_id.as_u64()); let provider = self.provider().context("provider()")?; - let signer = SignerMiddleware::new(provider, governor.clone()); + let signer = SignerMiddleware::new(provider, wallet.clone()); // Allows us to send next transaction without waiting for the previous to complete. - let signer = NonceManagerMiddleware::new(signer, governor.address()); + let signer = NonceManagerMiddleware::new(signer, wallet.address()); Ok(Arc::new(signer)) } fn new(shell: &Shell) -> anyhow::Result { - let ecosystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_config = + EcosystemConfig::from_file(shell).context("EcosystemConfig::from_file()")?; let chain = ecosystem_config .load_current_chain() .context(messages::MSG_CHAIN_NOT_INITIALIZED)?; @@ -227,9 +247,21 @@ impl Setup { attester::Committee::new(attesters.into_iter()).context("attester::Committee::new()") } - async fn set_attester_committee(&self) -> anyhow::Result { + fn read_attester_committee( + &self, + opts: &SetAttesterCommitteeCommand, + ) -> anyhow::Result { // Fetch the desired state. - let want = (|| { + if let Some(path) = &opts.from_file { + let yaml = std::fs::read_to_string(path).context("read_to_string()")?; + let file: SetAttesterCommitteeFile = zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt_from_yaml(&yaml) + .context("proto_fmt_from_yaml()")?; + return Ok(file.attesters); + } + let attesters = (|| { Some( &self .general @@ -241,15 +273,32 @@ impl Setup { ) })() .context(messages::MSG_CONSENSUS_GENESIS_SPEC_ATTESTERS_MISSING_IN_GENERAL_YAML)?; - let want = parse_attester_committee(want).context("parse_attester_committee()")?; + parse_attester_committee(attesters).context("parse_attester_committee()") + } + async fn set_attester_committee(&self, want: &attester::Committee) -> anyhow::Result<()> { let provider = self.provider().context("provider()")?; let block_id = self.last_block(&provider).await.context("last_block()")?; let governor = self.governor().context("governor()")?; + let signer = self.signer( + governor + .private_key + .clone() + .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?, + )?; let consensus_registry = self - .consensus_registry(governor.clone()) + .consensus_registry(signer.clone()) .context("consensus_registry()")?; - let mut multicall = self.multicall(governor.clone()).context("multicall()")?; + let mut multicall = self.multicall(signer).context("multicall()")?; + + let owner = consensus_registry.owner().call().await.context("owner()")?; + if owner != governor.address { + anyhow::bail!( + "governor ({:#x}) is different than the consensus registry owner ({:#x})", + governor.address, + owner + ); + } // Fetch contract state. let n: usize = consensus_registry @@ -337,7 +386,7 @@ impl Setup { ) .await?; txs.wait(&provider).await.context("wait()")?; - Ok(want) + Ok(()) } } @@ -345,8 +394,11 @@ impl Command { pub(crate) async fn run(self, shell: &Shell) -> anyhow::Result<()> { let setup = Setup::new(shell).context("Setup::new()")?; match self { - Self::SetAttesterCommittee => { - let want = setup.set_attester_committee().await?; + Self::SetAttesterCommittee(opts) => { + let want = setup + .read_attester_committee(&opts) + .context("read_attester_committee()")?; + setup.set_attester_committee(&want).await?; let got = setup.get_attester_committee().await?; anyhow::ensure!( got == want, diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto new file mode 100644 index 00000000000..d8a7323f714 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package zksync.toolbox.consensus; + +import "zksync/core/consensus.proto"; + +message SetAttesterCommitteeFile { + repeated core.consensus.WeightedAttester attesters = 1; +} diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs new file mode 100644 index 00000000000..61a0a047f0a --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs @@ -0,0 +1,6 @@ +#![allow(warnings)] + +include!(concat!( + env!("OUT_DIR"), + "/src/commands/consensus/proto/gen.rs" +)); diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs new file mode 100644 index 00000000000..c2f393ad229 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs @@ -0,0 +1,19 @@ +use rand::{distributions::Distribution, Rng}; +use zksync_consensus_utils::EncodeDist; +use zksync_protobuf::testonly::{test_encode_all_formats, FmtConv}; + +use super::SetAttesterCommitteeFile; + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> SetAttesterCommitteeFile { + SetAttesterCommitteeFile { + attesters: rng.gen(), + } + } +} + +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + test_encode_all_formats::>(rng); +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zkstack_cli/crates/zkstack/src/commands/containers.rs similarity index 90% rename from zk_toolbox/crates/zk_inception/src/commands/containers.rs rename to zkstack_cli/crates/zkstack/src/commands/containers.rs index 9c11cc2e3ef..8367289bd67 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/containers.rs +++ b/zkstack_cli/crates/zkstack/src/commands/containers.rs @@ -36,10 +36,6 @@ pub fn run(shell: &Shell, args: ContainersArgs) -> anyhow::Result<()> { } pub fn initialize_docker(shell: &Shell, ecosystem: &EcosystemConfig) -> anyhow::Result<()> { - if !shell.path_exists("volumes") { - create_docker_folders(shell)?; - }; - if !shell.path_exists(DOCKER_COMPOSE_FILE) { copy_dockerfile(shell, ecosystem.link_to_code.clone())?; }; @@ -75,14 +71,6 @@ pub fn start_containers(shell: &Shell, observability: bool) -> anyhow::Result<() Ok(()) } -fn create_docker_folders(shell: &Shell) -> anyhow::Result<()> { - shell.create_dir("volumes")?; - shell.create_dir("volumes/postgres")?; - shell.create_dir("volumes/reth")?; - shell.create_dir("volumes/reth/data")?; - Ok(()) -} - fn copy_dockerfile(shell: &Shell, link_to_code: PathBuf) -> anyhow::Result<()> { let docker_compose_file = link_to_code.join(DOCKER_COMPOSE_FILE); diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs similarity index 81% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs index 6f7eae4c168..ab169220f29 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use common::{cmd::Cmd, spinner::Spinner}; +use common::spinner::Spinner; use serde::Deserialize; -use xshell::{cmd, Shell}; +use xshell::Shell; use crate::messages::{MSG_INVALID_ARCH_ERR, MSG_NO_RELEASES_FOUND_ERR}; @@ -76,16 +76,19 @@ fn get_compatible_archs(asset_name: &str) -> anyhow::Result> { fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result> { if repo == "ethereum/solc-bin" { - return get_solc_releases(shell, arch); + return get_solc_releases(arch); } - let response: std::process::Output = Cmd::new(cmd!( - shell, - "curl https://api.github.com/repos/{repo}/releases" - )) - .run_with_output()?; + let client = reqwest::blocking::Client::new(); + let mut request = client + .get(format!("https://api.github.com/repos/{repo}/releases")) + .header("User-Agent", "zkstack"); - let response = String::from_utf8(response.stdout)?; + if let Ok(token) = shell.var("GITHUB_TOKEN") { + request = request.header("Authorization", format!("Bearer {}", token)); + } + + let response = request.send()?.text()?; let releases: Vec = serde_json::from_str(&response)?; let mut versions = vec![]; @@ -109,7 +112,7 @@ fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result anyhow::Result> { +fn get_solc_releases(arch: Arch) -> anyhow::Result> { let (arch_str, compatible_archs) = match arch { Arch::LinuxAmd => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), Arch::LinuxArm => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), @@ -117,13 +120,15 @@ fn get_solc_releases(shell: &Shell, arch: Arch) -> anyhow::Result> Arch::MacosArm => ("macosx-amd64", vec![Arch::MacosAmd, Arch::MacosArm]), }; - let response: std::process::Output = Cmd::new(cmd!( - shell, - "curl https://raw.githubusercontent.com/ethereum/solc-bin/gh-pages/{arch_str}/list.json" - )) - .run_with_output()?; + let client = reqwest::blocking::Client::new(); + let response = client + .get(format!( + "https://raw.githubusercontent.com/ethereum/solc-bin/gh-pages/{arch_str}/list.json" + )) + .header("User-Agent", "zkstack") + .send()? + .text()?; - let response = String::from_utf8(response.stdout)?; let solc_list: SolcList = serde_json::from_str(&response)?; let mut versions = vec![]; diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs index f376a0d36ec..b173ad9bbb7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs @@ -89,7 +89,8 @@ fn download_binary( let spinner = Spinner::new(&msg_downloading_binary_spinner(name, version)); Cmd::new(cmd!(shell, "mkdir -p {path}")).run()?; - Cmd::new(cmd!(shell, "wget {url} -O {binary_path}")).run()?; + let response = reqwest::blocking::get(url)?.bytes()?; + shell.write_file(binary_path.clone(), &response)?; Cmd::new(cmd!(shell, "chmod +x {binary_path}")).run()?; spinner.finish(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs similarity index 92% rename from zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs index 06ee1347ea4..06dff541f94 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs @@ -4,9 +4,8 @@ use common::{docker, logger}; use config::{EcosystemConfig, DOCKER_COMPOSE_FILE}; use xshell::Shell; -use crate::messages::{ - MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_CLEANED, - MSG_DOCKER_COMPOSE_DOWN, MSG_DOCKER_COMPOSE_REMOVE_VOLUMES, +use crate::commands::dev::messages::{ + MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_DOWN, }; #[derive(Subcommand, Debug)] @@ -35,9 +34,6 @@ pub fn run(shell: &Shell, args: CleanCommands) -> anyhow::Result<()> { pub fn containers(shell: &Shell) -> anyhow::Result<()> { logger::info(MSG_DOCKER_COMPOSE_DOWN); docker::down(shell, DOCKER_COMPOSE_FILE)?; - logger::info(MSG_DOCKER_COMPOSE_REMOVE_VOLUMES); - shell.remove_path("volumes")?; - logger::info(MSG_DOCKER_COMPOSE_CLEANED); Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs similarity index 96% rename from zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs index 04e019936e1..70238ed15f3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs @@ -4,7 +4,7 @@ use common::{logger, Prompt}; use config::{override_config, EcosystemConfig}; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_overriding_config, MSG_CHAIN_NOT_FOUND_ERR, MSG_OVERRIDE_CONFIG_PATH_HELP, MSG_OVERRIDE_SUCCESS, MSG_OVERRRIDE_CONFIG_PATH_PROMPT, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs similarity index 66% rename from zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs index b0f129f7dde..ff638a033dd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs @@ -1,17 +1,23 @@ use std::path::PathBuf; use clap::Parser; -use common::{cmd::Cmd, logger, spinner::Spinner}; +use common::{ + contracts::{ + build_l1_contracts, build_l1_da_contracts, build_l2_contracts, build_system_contracts, + build_test_contracts, + }, + logger, + spinner::Spinner, +}; use config::EcosystemConfig; -use xshell::{cmd, Shell}; +use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, MSG_BUILDING_L1_DA_CONTRACTS_SPINNER, MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L1_DA_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, - MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_CONTRACTS_DEPS_SPINNER, - MSG_NOTHING_TO_BUILD_MSG, + MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_NOTHING_TO_BUILD_MSG, }; #[derive(Debug, Parser)] @@ -76,72 +82,46 @@ pub enum ContractType { TestContracts, } -#[derive(Debug)] struct ContractBuilder { - dir: PathBuf, - cmd: String, + cmd: Box anyhow::Result<()>>, msg: String, + link_to_code: PathBuf, } impl ContractBuilder { fn new(ecosystem: &EcosystemConfig, contract_type: ContractType) -> Self { match contract_type { ContractType::L1 => Self { - dir: ecosystem.path_to_l1_foundry(), - cmd: "forge build".to_string(), + cmd: Box::new(build_l1_contracts), msg: MSG_BUILDING_L1_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::L1DA => Self { - dir: ecosystem.link_to_code.join("contracts/da-contracts"), - cmd: "forge build".to_string(), + cmd: Box::new(build_l1_da_contracts), msg: MSG_BUILDING_L1_DA_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::L2 => Self { - dir: ecosystem.link_to_code.clone(), - cmd: "yarn l2-contracts build".to_string(), + cmd: Box::new(build_l2_contracts), msg: MSG_BUILDING_L2_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::SystemContracts => Self { - dir: ecosystem.link_to_code.join("contracts"), - cmd: "yarn sc build".to_string(), + cmd: Box::new(build_system_contracts), msg: MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::TestContracts => Self { - dir: ecosystem.link_to_code.join("etc/contracts-test-data"), - cmd: "yarn build".to_string(), + cmd: Box::new(build_test_contracts), msg: MSG_BUILDING_TEST_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, } } - fn build(&self, shell: &Shell) -> anyhow::Result<()> { + fn build(self, shell: Shell) -> anyhow::Result<()> { let spinner = Spinner::new(&self.msg); - let _dir_guard = shell.push_dir(&self.dir); - - // FIXME: extreme hack, we also need to build 1l contracts without foundry for now - if self.msg == MSG_BUILDING_L1_CONTRACTS_SPINNER { - let cstr = "yarn build".to_string(); - let mut args = cstr.split_whitespace().collect::>(); - let command = args.remove(0); // It's safe to unwrap here because we know that the vec is not empty - let mut cmd = cmd!(shell, "{command}"); - - for arg in args { - cmd = cmd.arg(arg); - } - - Cmd::new(cmd).run()?; - } - - let mut args = self.cmd.split_whitespace().collect::>(); - let command = args.remove(0); // It's safe to unwrap here because we know that the vec is not empty - let mut cmd = cmd!(shell, "{command}"); - - for arg in args { - cmd = cmd.arg(arg); - } - - Cmd::new(cmd).run()?; - + (self.cmd)(shell, self.link_to_code.clone())?; spinner.finish(); Ok(()) } @@ -157,17 +137,11 @@ pub fn run(shell: &Shell, args: ContractsArgs) -> anyhow::Result<()> { logger::info(MSG_BUILDING_CONTRACTS); let ecosystem = EcosystemConfig::from_file(shell)?; - let link_to_code = ecosystem.link_to_code.clone(); - - let spinner = Spinner::new(MSG_CONTRACTS_DEPS_SPINNER); - let _dir_guard = shell.push_dir(&link_to_code); - Cmd::new(cmd!(shell, "yarn install")).run()?; - spinner.finish(); contracts .iter() .map(|contract| ContractBuilder::new(&ecosystem, *contract)) - .try_for_each(|builder| builder.build(shell))?; + .try_for_each(|builder| builder.build(shell.clone()))?; logger::outro(MSG_BUILDING_CONTRACTS_SUCCESS); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs index cf9dfc2834a..f05e3ee1c0e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::{ +use crate::commands::dev::{ dals::SelectedDals, messages::{ MSG_DATABASE_COMMON_CORE_HELP, MSG_DATABASE_COMMON_CORE_URL_HELP, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs index 64b7a507abe..b91b048be78 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs @@ -2,7 +2,7 @@ use clap::{Parser, ValueEnum}; use common::{Prompt, PromptSelect}; use strum::{Display, EnumIter, IntoEnumIterator}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DATABASE_NEW_MIGRATION_DATABASE_HELP, MSG_DATABASE_NEW_MIGRATION_DB_PROMPT, MSG_DATABASE_NEW_MIGRATION_NAME_HELP, MSG_DATABASE_NEW_MIGRATION_NAME_PROMPT, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs index 0c401595690..990fca78641 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs index 94bf325a2c6..a5578d41f77 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs @@ -6,7 +6,7 @@ use common::{ use xshell::Shell; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_DROP_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs index 1d648965c24..fd22f769742 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_MIGRATE_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs index 415b81879f1..ed039fc6501 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use xshell::Shell; use self::args::{new_migration::DatabaseNewMigrationArgs, DatabaseCommonArgs}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DATABASE_CHECK_SQLX_DATA_ABOUT, MSG_DATABASE_DROP_ABOUT, MSG_DATABASE_MIGRATE_ABOUT, MSG_DATABASE_NEW_MIGRATION_ABOUT, MSG_DATABASE_PREPARE_ABOUT, MSG_DATABASE_RESET_ABOUT, MSG_DATABASE_SETUP_ABOUT, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs index e21b7cde47b..2d9fa103053 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::new_migration::{DatabaseNewMigrationArgs, SelectedDatabase}; -use crate::{ +use crate::commands::dev::{ dals::{get_core_dal, get_prover_dal, Dal}, messages::{msg_database_new_migration_loading, MSG_DATABASE_NEW_MIGRATION_SUCCESS}, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs index 82ec12f9412..288a68452fd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_PREPARE_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs index f0262cecb95..55d5ab1cbfc 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::Shell; use super::{args::DatabaseCommonArgs, drop::drop_database, setup::setup_database}; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_RESET_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs index 15b3ac5c1c7..74ade66ba48 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_SETUP_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs similarity index 92% rename from zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs index 3aefc15aba7..ebaf27845e0 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs @@ -6,7 +6,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::sql_fmt::format_sql; -use crate::{ +use crate::commands::dev::{ commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_fmt_for_extension_spinner, msg_running_fmt_for_extensions_spinner, @@ -42,7 +42,7 @@ async fn prettier_contracts(shell: Shell, check: bool) -> anyhow::Result<()> { } async fn rustfmt(shell: Shell, check: bool, link_to_code: PathBuf) -> anyhow::Result<()> { - for dir in [".", "prover", "zk_toolbox"] { + for dir in [".", "prover", "zkstack_cli"] { let spinner = Spinner::new(&msg_running_rustfmt_for_dir_spinner(dir)); let _dir = shell.push_dir(link_to_code.join(dir)); let mut cmd = cmd!(shell, "cargo fmt -- --config imports_granularity=Crate --config group_imports=StdExternalCrate"); @@ -101,14 +101,9 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { ))); tasks.push(tokio::spawn(prettier_contracts(shell.clone(), args.check))); - futures::future::join_all(tasks) - .await - .iter() - .for_each(|res| { - if let Err(err) = res { - logger::error(err) - } - }); + for result in futures::future::join_all(tasks).await { + result??; + } } Some(Formatter::Prettier { mut targets }) => { if targets.is_empty() { diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs new file mode 100644 index 00000000000..683ffe19916 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs @@ -0,0 +1,26 @@ +use anyhow::Context; +use common::{cmd::Cmd, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::{ + commands::dev::{ + commands::database::reset::reset_database, dals::get_core_dal, + messages::MSG_GENESIS_FILE_GENERATION_STARTED, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, +}; + +pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_chain(Some(ecosystem.current_chain().to_string())) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let spinner = Spinner::new(MSG_GENESIS_FILE_GENERATION_STARTED); + let secrets_path = chain.path_to_secrets_config(); + let dal = get_core_dal(shell, None)?; + reset_database(shell, ecosystem.link_to_code, dal).await?; + Cmd::new(cmd!(shell,"cargo run --package genesis_generator --bin genesis_generator -- --config-path={secrets_path}")).run()?; + spinner.finish(); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs similarity index 62% rename from zk_toolbox/crates/zk_supervisor/src/commands/lint.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs index 45a7a46ebbe..6c3c3fa3d75 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs @@ -1,13 +1,23 @@ +use std::{ + fs::File, + io::{Read, Write}, + path::Path, +}; + +use anyhow::{bail, Context}; use clap::Parser; use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::{ - commands::lint_utils::{get_unignored_files, Target}, - messages::{ - msg_running_linter_for_extension_spinner, msg_running_linters_for_files, - MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, +use crate::commands::{ + autocomplete::{autocomplete_file_name, generate_completions}, + dev::{ + commands::lint_utils::{get_unignored_files, Target}, + messages::{ + msg_running_linter_for_extension_spinner, msg_running_linters_for_files, + MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, + }, }, }; @@ -30,6 +40,7 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { Target::Js, Target::Ts, Target::Contracts, + Target::Autocompletion, ] } else { args.targets.clone() @@ -43,10 +54,13 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { match target { Target::Rs => lint_rs(shell, &ecosystem, args.check)?, Target::Contracts => lint_contracts(shell, &ecosystem, args.check)?, + Target::Autocompletion => lint_autocompletion_files(shell, args.check)?, ext => lint(shell, &ecosystem, &ext, args.check)?, } } + logger::outro("Linting complete."); + Ok(()) } @@ -55,8 +69,8 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R let link_to_code = &ecosystem.link_to_code; let lint_to_prover = &ecosystem.link_to_code.join("prover"); - let link_to_toolbox = &ecosystem.link_to_code.join("zk_toolbox"); - let paths = vec![link_to_code, lint_to_prover, link_to_toolbox]; + let link_to_zkstack = &ecosystem.link_to_code.join("zkstack_cli"); + let paths = vec![link_to_code, lint_to_prover, link_to_zkstack]; spinner.freeze(); for path in paths { @@ -81,6 +95,7 @@ fn get_linter(target: &Target) -> Vec { Target::Js => vec!["eslint".to_string()], Target::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], Target::Contracts => vec![], + Target::Autocompletion => vec![], } } @@ -133,3 +148,45 @@ fn lint_contracts(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> an Ok(()) } + +fn lint_autocompletion_files(_shell: &Shell, check: bool) -> anyhow::Result<()> { + let completion_folder = Path::new("./zkstack_cli/crates/zkstack/completion/"); + if !completion_folder.exists() { + logger::info("WARNING: Please run this command from the project's root folder"); + return Ok(()); + } + + // Array of supported shells + let shells = [ + clap_complete::Shell::Bash, + clap_complete::Shell::Fish, + clap_complete::Shell::Zsh, + ]; + + for shell in shells { + let mut writer = Vec::new(); + + generate_completions(shell, &mut writer) + .context("Failed to generate autocompletion file")?; + + let new = String::from_utf8(writer)?; + + let path = completion_folder.join(autocomplete_file_name(&shell)); + let mut autocomplete_file = File::open(path.clone()) + .context(format!("failed to open {}", autocomplete_file_name(&shell)))?; + + let mut old = String::new(); + autocomplete_file.read_to_string(&mut old)?; + + if new != old { + if !check { + let mut autocomplete_file = File::create(path).context("Failed to create file")?; + autocomplete_file.write_all(new.as_bytes())?; + } else { + bail!("Autocompletion files need to be regenerated. Run `zkstack dev lint -t autocompletion` to fix this issue.") + } + } + } + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs index 9095e445384..11a32504710 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs @@ -14,6 +14,7 @@ pub enum Target { Ts, Rs, Contracts, + Autocompletion, } #[derive(Deserialize, Serialize, Debug)] diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs similarity index 87% rename from zk_toolbox/crates/zk_supervisor/src/commands/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs index 38ec586e745..a292168dc6e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs @@ -3,10 +3,12 @@ pub mod config_writer; pub mod contracts; pub mod database; pub mod fmt; +pub mod genesis; pub mod lint; pub(crate) mod lint_utils; pub mod prover; pub mod send_transactions; pub mod snapshot; pub(crate) mod sql_fmt; +pub mod status; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_batch.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_batch.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_version.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_version.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs index 441edb2c4b2..84873e931b3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs @@ -8,7 +8,7 @@ use common::logger; use config::{ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; +use crate::commands::dev::messages::MSG_CHAIN_NOT_FOUND_ERR; pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs index 8c2cdd4d88d..0e0c0ba33af 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs @@ -2,7 +2,7 @@ use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::prover::{ args::insert_batch::{InsertBatchArgs, InsertBatchArgsFinal}, info, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs index 3dd9b7e0a1b..f7bd175f577 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs @@ -2,7 +2,7 @@ use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::prover::{ args::insert_version::{InsertVersionArgs, InsertVersionArgsFinal}, info, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs index e3d4f220ff2..03d9ec9b736 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs @@ -4,7 +4,7 @@ use clap::Parser; use common::Prompt; use url::Url; -use crate::{ +use crate::commands::dev::{ defaults::LOCAL_RPC_URL, messages::{ MSG_INVALID_L1_RPC_URL_ERR, MSG_PROMPT_L1_RPC_URL, MSG_PROMPT_SECRET_KEY, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs similarity index 99% rename from zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs index 79d8efc600e..2f54579ade9 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs @@ -17,7 +17,7 @@ use tokio::time::sleep; use xshell::Shell; use zksync_basic_types::{H160, U256}; -use crate::{ +use crate::commands::dev::{ consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, messages::{ msg_send_txns_outro, MSG_FAILED_TO_SEND_TXN_ERR, MSG_UNABLE_TO_OPEN_FILE_ERR, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs similarity index 91% rename from zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs index 608c5623334..8e4c7183cb5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs @@ -4,7 +4,7 @@ use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_RUNNING_SNAPSHOT_CREATOR}; +use crate::commands::dev::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_RUNNING_SNAPSHOT_CREATOR}; #[derive(Subcommand, Debug)] pub enum SnapshotCommands { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs index ede2500e6ab..0f7ce061ce1 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs @@ -6,7 +6,7 @@ use sqruff_lib::{api::simple::get_simple_config, core::linter::core::Linter}; use xshell::Shell; use super::lint_utils::{get_unignored_files, IgnoredData, Target}; -use crate::messages::{msg_file_is_not_formatted, MSG_RUNNING_SQL_FMT_SPINNER}; +use crate::commands::dev::messages::{msg_file_is_not_formatted, MSG_RUNNING_SQL_FMT_SPINNER}; fn format_query(query: &str) -> anyhow::Result { let exclude_rules = vec!["LT12".to_string()]; // avoid adding newline before `$` character @@ -138,7 +138,7 @@ pub async fn format_sql(shell: Shell, check: bool) -> anyhow::Result<()> { let spinner = Spinner::new(MSG_RUNNING_SQL_FMT_SPINNER); let ignored_data = Some(IgnoredData { files: vec![], - dirs: vec!["zk_toolbox".to_string()], + dirs: vec!["zkstack_cli".to_string()], }); let rust_files = get_unignored_files(&shell, &Target::Rs, ignored_data)?; for file in rust_files { diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs new file mode 100644 index 00000000000..5ac52bf854a --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs @@ -0,0 +1,45 @@ +use anyhow::Context; +use clap::Parser; +use config::EcosystemConfig; +use xshell::Shell; + +use crate::{ + commands::dev::messages::{ + MSG_API_CONFIG_NOT_FOUND_ERR, MSG_STATUS_PORTS_HELP, MSG_STATUS_URL_HELP, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, +}; + +#[derive(Debug, Parser)] +pub enum StatusSubcommands { + #[clap(about = MSG_STATUS_PORTS_HELP)] + Ports, +} + +#[derive(Debug, Parser)] +pub struct StatusArgs { + #[clap(long, short = 'u', help = MSG_STATUS_URL_HELP)] + pub url: Option, + #[clap(subcommand)] + pub subcommand: Option, +} + +impl StatusArgs { + pub fn get_url(&self, shell: &Shell) -> anyhow::Result { + if let Some(url) = &self.url { + Ok(url.clone()) + } else { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let general_config = chain.get_general_config()?; + let health_check_port = general_config + .api_config + .context(MSG_API_CONFIG_NOT_FOUND_ERR)? + .healthcheck + .port; + Ok(format!("http://localhost:{}/health", health_check_port)) + } + } +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs new file mode 100644 index 00000000000..d38d5b6d29f --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs @@ -0,0 +1,88 @@ +use crate::{commands::dev::commands::status::utils::is_port_open, utils::ports::PortInfo}; + +const DEFAULT_LINE_WIDTH: usize = 32; + +pub struct BoxProperties { + longest_line: usize, + border: String, + boxed_msg: Vec, +} + +impl BoxProperties { + fn new(msg: &str) -> Self { + let longest_line = msg + .lines() + .map(|line| line.len()) + .max() + .unwrap_or(0) + .max(DEFAULT_LINE_WIDTH); + let width = longest_line + 2; + let border = "─".repeat(width); + let boxed_msg = msg + .lines() + .map(|line| format!("│ {:longest_line$} │", line)) + .collect(); + Self { + longest_line, + border, + boxed_msg, + } + } +} + +fn single_bordered_box(msg: &str) -> String { + let properties = BoxProperties::new(msg); + format!( + "┌{}┐\n{}\n└{}┘\n", + properties.border, + properties.boxed_msg.join("\n"), + properties.border + ) +} + +pub fn bordered_boxes(msg1: &str, msg2: Option<&String>) -> String { + if msg2.is_none() { + return single_bordered_box(msg1); + } + + let properties1 = BoxProperties::new(msg1); + let properties2 = BoxProperties::new(msg2.unwrap()); + + let max_lines = properties1.boxed_msg.len().max(properties2.boxed_msg.len()); + let header = format!("┌{}┐ ┌{}┐\n", properties1.border, properties2.border); + let footer = format!("└{}┘ └{}┘\n", properties1.border, properties2.border); + + let empty_line1 = format!( + "│ {:longest_line$} │", + "", + longest_line = properties1.longest_line + ); + let empty_line2 = format!( + "│ {:longest_line$} │", + "", + longest_line = properties2.longest_line + ); + + let boxed_info: Vec = (0..max_lines) + .map(|i| { + let line1 = properties1.boxed_msg.get(i).unwrap_or(&empty_line1); + let line2 = properties2.boxed_msg.get(i).unwrap_or(&empty_line2); + format!("{} {}", line1, line2) + }) + .collect(); + + format!("{}{}\n{}", header, boxed_info.join("\n"), footer) +} + +pub fn format_port_info(port_info: &PortInfo) -> String { + let in_use_tag = if is_port_open(port_info.port) { + " [OPEN]" + } else { + "" + }; + + format!( + " - {}{} > {}\n", + port_info.port, in_use_tag, port_info.description + ) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs new file mode 100644 index 00000000000..8687fcb0476 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs @@ -0,0 +1,135 @@ +use std::collections::HashMap; + +use anyhow::Context; +use args::{StatusArgs, StatusSubcommands}; +use common::logger; +use draw::{bordered_boxes, format_port_info}; +use serde::Deserialize; +use serde_json::Value; +use utils::deslugify; +use xshell::Shell; + +use crate::{ + commands::dev::messages::{ + msg_failed_parse_response, msg_not_ready_components, msg_system_status, + MSG_ALL_COMPONENTS_READY, MSG_COMPONENTS, MSG_SOME_COMPONENTS_NOT_READY, + }, + utils::ports::EcosystemPortsScanner, +}; + +pub mod args; +mod draw; +mod utils; + +const STATUS_READY: &str = "ready"; + +#[derive(Deserialize, Debug)] +struct StatusResponse { + status: String, + components: HashMap, +} + +#[derive(Deserialize, Debug)] +struct Component { + status: String, + details: Option, +} + +fn print_status(health_check_url: String) -> anyhow::Result<()> { + let client = reqwest::blocking::Client::new(); + let response = client.get(&health_check_url).send()?.text()?; + + let status_response: StatusResponse = + serde_json::from_str(&response).context(msg_failed_parse_response(&response))?; + + if status_response.status.to_lowercase() == STATUS_READY { + logger::success(msg_system_status(&status_response.status)); + } else { + logger::warn(msg_system_status(&status_response.status)); + } + + let mut components_info = String::from(MSG_COMPONENTS); + let mut components = Vec::new(); + let mut not_ready_components = Vec::new(); + + for (component_name, component) in status_response.components { + let readable_name = deslugify(&component_name); + let mut component_info = format!("{}:\n - Status: {}", readable_name, component.status); + + if let Some(details) = &component.details { + for (key, value) in details.as_object().unwrap() { + component_info.push_str(&format!("\n - {}: {}", deslugify(key), value)); + } + } + + if component.status.to_lowercase() != STATUS_READY { + not_ready_components.push(readable_name); + } + + components.push(component_info); + } + + components.sort_by(|a, b| { + a.lines() + .count() + .cmp(&b.lines().count()) + .then_with(|| a.cmp(b)) + }); + + for chunk in components.chunks(2) { + components_info.push_str(&bordered_boxes(&chunk[0], chunk.get(1))); + } + + logger::info(components_info); + + if not_ready_components.is_empty() { + logger::outro(MSG_ALL_COMPONENTS_READY); + } else { + logger::warn(MSG_SOME_COMPONENTS_NOT_READY); + logger::outro(msg_not_ready_components(¬_ready_components.join(", "))); + } + + Ok(()) +} + +fn print_ports(shell: &Shell) -> anyhow::Result<()> { + let ports = EcosystemPortsScanner::scan(shell)?; + let grouped_ports = ports.group_by_file_path(); + + let mut all_port_lines: Vec = Vec::new(); + + for (file_path, port_infos) in grouped_ports { + let mut port_info_lines = String::new(); + + for port_info in port_infos { + port_info_lines.push_str(&format_port_info(&port_info)); + } + + all_port_lines.push(format!("{}:\n{}", file_path, port_info_lines)); + } + + all_port_lines.sort_by(|a, b| { + b.lines() + .count() + .cmp(&a.lines().count()) + .then_with(|| a.cmp(b)) + }); + + let mut components_info = String::from("Ports:\n"); + for chunk in all_port_lines.chunks(2) { + components_info.push_str(&bordered_boxes(&chunk[0], chunk.get(1))); + } + + logger::info(components_info); + Ok(()) +} + +pub async fn run(shell: &Shell, args: StatusArgs) -> anyhow::Result<()> { + if let Some(StatusSubcommands::Ports) = args.subcommand { + return print_ports(shell); + } + + let health_check_url = args.get_url(shell)?; + + print_status(health_check_url) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs new file mode 100644 index 00000000000..399a0fb0fec --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs @@ -0,0 +1,26 @@ +use std::net::TcpListener; + +pub fn is_port_open(port: u16) -> bool { + TcpListener::bind(("0.0.0.0", port)).is_err() || TcpListener::bind(("127.0.0.1", port)).is_err() +} + +pub fn deslugify(name: &str) -> String { + name.split('_') + .map(|word| { + let mut chars = word.chars(); + match chars.next() { + Some(first) => { + let capitalized = first.to_uppercase().collect::() + chars.as_str(); + match capitalized.as_str() { + "Http" => "HTTP".to_string(), + "Api" => "API".to_string(), + "Ws" => "WS".to_string(), + _ => capitalized, + } + } + None => String::new(), + } + }) + .collect::>() + .join(" ") +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs similarity index 65% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs index 1337566e536..9e76850ff2e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs @@ -1,12 +1,12 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; +use crate::commands::dev::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct FeesArgs { #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs similarity index 63% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs index 435dddfc360..625df0fc151 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP}; +use crate::commands::dev::messages::{ + MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct IntegrationArgs { @@ -9,6 +11,6 @@ pub struct IntegrationArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_TEST_PATTERN_HELP)] + #[clap(short, long, help = MSG_TEST_PATTERN_HELP, allow_hyphen_values(true))] pub test_pattern: Option, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs similarity index 66% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs index 81cc58fbd9b..b6ce278a1ca 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_TESTS_RECOVERY_SNAPSHOT_HELP}; +use crate::commands::dev::messages::{ + MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_TESTS_RECOVERY_SNAPSHOT_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct RecoveryArgs { @@ -9,6 +11,6 @@ pub struct RecoveryArgs { pub snapshot: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs similarity index 85% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs index 0154a4c0afd..9f86eec7f3d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, }; @@ -13,6 +13,6 @@ pub struct RevertArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs similarity index 70% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs index 2d94adc3f6a..6ca277f6a2f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::MSG_TEST_RUST_OPTIONS_HELP; +use crate::commands::dev::messages::MSG_TEST_RUST_OPTIONS_HELP; #[derive(Debug, Parser)] pub struct RustArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs similarity index 72% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs index dd96957e9d3..7b631b91e9a 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::MSG_NO_DEPS_HELP; +use crate::commands::dev::messages::MSG_NO_DEPS_HELP; #[derive(Debug, Parser)] pub struct UpgradeArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs index f48967f5973..dea6a46bbef 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs @@ -6,8 +6,8 @@ use super::utils::{build_contracts, install_and_build_dependencies}; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; - build_contracts(shell, &ecosystem_config)?; install_and_build_dependencies(shell, &ecosystem_config)?; + build_contracts(shell, &ecosystem_config)?; Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs similarity index 83% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs index a08b0404605..19f6307019b 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs @@ -3,7 +3,7 @@ use std::path::Path; use common::{cmd::Cmd, db::wait_for_db, logger}; use xshell::{cmd, Shell}; -use crate::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; +use crate::commands::dev::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; pub async fn reset_test_databases( shell: &Shell, @@ -26,7 +26,7 @@ pub async fn reset_test_databases( for dal in dals { let mut url = dal.url.clone(); url.set_path(""); - wait_for_db(&url, 3).await?; + wait_for_db(&url, 20).await?; database::reset::reset_database(shell, link_to_code, dal.clone()).await?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs index e0b881a14db..e58a70e6b7c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs @@ -9,7 +9,7 @@ use super::{ args::fees::FeesArgs, utils::{build_contracts, install_and_build_dependencies, TS_INTEGRATION_PATH}, }; -use crate::{ +use crate::commands::dev::{ commands::test::utils::{TestWallets, TEST_WALLETS_PATH}, messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs index 8f76e56fe83..bee0f0788ee 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs @@ -12,7 +12,7 @@ use super::{ TS_INTEGRATION_PATH, }, }; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_integration_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_INTEGRATION_TESTS_RUN_SUCCESS, }; @@ -28,8 +28,8 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { logger::info(msg_integration_tests_run(args.external_node)); if !args.no_deps { - build_contracts(shell, &ecosystem_config)?; install_and_build_dependencies(shell, &ecosystem_config)?; + build_contracts(shell, &ecosystem_config)?; } let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs similarity index 86% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs index 0a1e1ec5203..7d163daed67 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs @@ -2,7 +2,7 @@ use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::MSG_L1_CONTRACTS_TEST_SUCCESS; +use crate::commands::dev::messages::MSG_L1_CONTRACTS_TEST_SUCCESS; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs similarity index 95% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs index ee307438ec9..72a8f97ff97 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs @@ -3,7 +3,7 @@ use common::{cmd::Cmd, config::global_config, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; +use crate::commands::dev::messages::MSG_CHAIN_NOT_FOUND_ERR; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs similarity index 92% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs index ae6b4518e6d..095e27652aa 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs @@ -5,7 +5,7 @@ use args::{ use clap::Subcommand; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_BUILD_ABOUT, MSG_INTEGRATION_TESTS_ABOUT, MSG_L1_CONTRACTS_ABOUT, MSG_LOADTEST_ABOUT, MSG_PROVER_TEST_ABOUT, MSG_RECOVERY_TEST_ABOUT, MSG_REVERT_TEST_ABOUT, MSG_RUST_TEST_ABOUT, MSG_TEST_WALLETS_INFO, MSG_UPGRADE_TEST_ABOUT, @@ -30,7 +30,7 @@ mod wallet; pub enum TestCommands { #[clap(about = MSG_INTEGRATION_TESTS_ABOUT, alias = "i")] Integration(IntegrationArgs), - #[clap(about = "Run fees test", alias = "i")] + #[clap(about = "Run fees test", alias = "f")] Fees(FeesArgs), #[clap(about = MSG_REVERT_TEST_ABOUT, alias = "r")] Revert(RevertArgs), @@ -40,7 +40,7 @@ pub enum TestCommands { Upgrade(UpgradeArgs), #[clap(about = MSG_BUILD_ABOUT)] Build, - #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit")] + #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit", allow_hyphen_values(true))] Rust(RustArgs), #[clap(about = MSG_L1_CONTRACTS_ABOUT, alias = "l1")] L1Contracts, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs index f48b359a935..200baf57215 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use url::Url; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::test::db::reset_test_databases, dals::{Dal, PROVER_DAL_PATH}, defaults::TEST_DATABASE_PROVER_URL, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs index 6a3e337d41e..ae889969fd2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs @@ -9,7 +9,7 @@ use super::{ args::recovery::RecoveryArgs, utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, }; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_RECOVERY_TEST_RUN_INFO, MSG_RECOVERY_TEST_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs index 8b00e9d7f4d..dc95c88db20 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs @@ -9,7 +9,7 @@ use super::{ args::revert::RevertArgs, utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, }; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_revert_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_REVERT_TEST_RUN_INFO, MSG_REVERT_TEST_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs similarity index 94% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs index 7011e0f0f87..8c0c707f6a2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs @@ -7,7 +7,7 @@ use url::Url; use xshell::{cmd, Shell}; use super::args::rust::RustArgs; -use crate::{ +use crate::commands::dev::{ commands::test::db::reset_test_databases, dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, @@ -75,8 +75,8 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { .env("TEST_PROVER_DATABASE_URL", test_prover_url); cmd.run()?; - // Run unit tests for zk_toolbox - let _dir_guard = shell.push_dir(link_to_code.join("zk_toolbox")); + // Run unit tests for ZK Stack CLI + let _dir_guard = shell.push_dir(link_to_code.join("zkstack_cli")); Cmd::new(cmd!(shell, "cargo nextest run --release")) .with_force_run() .run()?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs similarity index 91% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs index 9bd04b81ef3..707e0086ed1 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs @@ -3,7 +3,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::{args::upgrade::UpgradeArgs, utils::install_and_build_dependencies}; -use crate::messages::{MSG_UPGRADE_TEST_RUN_INFO, MSG_UPGRADE_TEST_RUN_SUCCESS}; +use crate::commands::dev::messages::{MSG_UPGRADE_TEST_RUN_INFO, MSG_UPGRADE_TEST_RUN_SUCCESS}; const UPGRADE_TESTS_PATH: &str = "core/tests/upgrade-test"; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs similarity index 93% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs index 8656ff44d31..bcd524bd2cb 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs @@ -10,7 +10,7 @@ use ethers::{ use serde::Deserialize; use xshell::{cmd, Shell}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES, }; @@ -43,10 +43,11 @@ impl TestWallets { } pub fn get_test_pk(&self, chain_config: &ChainConfig) -> anyhow::Result { - self.get_test_wallet(chain_config)? - .private_key - .ok_or(anyhow::Error::msg("Private key not found")) - .map(|pk| pk.encode_hex::()) + Ok(self + .get_test_wallet(chain_config)? + .private_key_h256() + .context("Private key not found")? + .encode_hex()) } pub async fn init_test_wallet( diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs similarity index 96% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs index 62f32b50d55..6953014bf92 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs @@ -6,7 +6,7 @@ use config::EcosystemConfig; use xshell::Shell; use super::utils::{TestWallets, TEST_WALLETS_PATH}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_TEST_WALLETS_INFO, MSG_WALLETS_TEST_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/consts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/consts.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/consts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/consts.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zkstack_cli/crates/zkstack/src/commands/dev/dals.rs similarity index 95% rename from zk_toolbox/crates/zk_supervisor/src/dals.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/dals.rs index 962a848fe00..9626edfed73 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/dals.rs @@ -1,9 +1,9 @@ -use anyhow::{anyhow, Context}; +use anyhow::Context as _; use config::{EcosystemConfig, SecretsConfig}; use url::Url; use xshell::Shell; -use crate::{ +use super::{ commands::database::args::DalUrls, messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_DATABASE_MUST_BE_PRESENTED}, }; @@ -91,7 +91,7 @@ fn get_secrets(shell: &Shell) -> anyhow::Result { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config .load_current_chain() - .ok_or(anyhow!(MSG_CHAIN_NOT_FOUND_ERR))?; + .context(MSG_CHAIN_NOT_FOUND_ERR)?; let secrets = chain_config.get_secrets_config()?; Ok(secrets) diff --git a/zk_toolbox/crates/zk_supervisor/src/defaults.rs b/zkstack_cli/crates/zkstack/src/commands/dev/defaults.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/defaults.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/defaults.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs similarity index 90% rename from zk_toolbox/crates/zk_supervisor/src/messages.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/messages.rs index 3a49fa1ae9b..4dad1b2b6e2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs @@ -1,13 +1,11 @@ -use crate::commands::lint_utils::Target; +use super::commands::lint_utils::Target; // Ecosystem related messages pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; -pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &str) -> String { - format!("Chain with name {chain} doesnt exist, please choose one of: {available_chains}") -} - // Subcommands help +pub(super) const MSG_GENERATE_GENESIS_ABOUT: &str = + "Generate new genesis file based on current contracts"; pub(super) const MSG_PROVER_VERSION_ABOUT: &str = "Protocol version used by provers"; pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; @@ -110,7 +108,6 @@ pub(super) const MSG_RESETTING_TEST_DATABASES: &str = "Resetting test databases" // Contract building related messages pub(super) const MSG_NOTHING_TO_BUILD_MSG: &str = "Nothing to build!"; pub(super) const MSG_BUILDING_CONTRACTS: &str = "Building contracts"; -pub(super) const MSG_CONTRACTS_DEPS_SPINNER: &str = "Installing dependencies.."; pub(super) const MSG_BUILDING_L2_CONTRACTS_SPINNER: &str = "Building L2 contracts.."; pub(super) const MSG_BUILDING_L1_CONTRACTS_SPINNER: &str = "Building L1 contracts.."; pub(super) const MSG_BUILDING_L1_DA_CONTRACTS_SPINNER: &str = "Building L1 DA contracts.."; @@ -162,9 +159,7 @@ pub(super) const MSG_UPGRADE_TEST_RUN_INFO: &str = "Running upgrade test"; pub(super) const MSG_UPGRADE_TEST_RUN_SUCCESS: &str = "Upgrade test ran successfully"; // Cleaning related messages -pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down"; -pub(super) const MSG_DOCKER_COMPOSE_REMOVE_VOLUMES: &str = "docker compose remove volumes"; -pub(super) const MSG_DOCKER_COMPOSE_CLEANED: &str = "docker compose network cleaned"; +pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down -v"; pub(super) const MSG_CONTRACTS_CLEANING: &str = "Removing contracts building and deployment artifacts"; pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = @@ -236,3 +231,29 @@ pub(super) const MSG_UNABLE_TO_WRITE_FILE_ERR: &str = "Unable to write data to f pub(super) const MSG_UNABLE_TO_READ_PARSE_JSON_ERR: &str = "Unable to parse JSON"; pub(super) const MSG_FAILED_TO_SEND_TXN_ERR: &str = "Failed to send transaction"; pub(super) const MSG_INVALID_L1_RPC_URL_ERR: &str = "Invalid L1 RPC URL"; + +// Status related messages +pub(super) const MSG_STATUS_ABOUT: &str = "Get status of the server"; +pub(super) const MSG_API_CONFIG_NOT_FOUND_ERR: &str = "API config not found"; +pub(super) const MSG_STATUS_URL_HELP: &str = "URL of the health check endpoint"; +pub(super) const MSG_STATUS_PORTS_HELP: &str = "Show used ports"; +pub(super) const MSG_COMPONENTS: &str = "Components:\n"; +pub(super) const MSG_ALL_COMPONENTS_READY: &str = + "Overall System Status: All components operational and ready."; +pub(super) const MSG_SOME_COMPONENTS_NOT_READY: &str = + "Overall System Status: Some components are not ready."; + +pub(super) fn msg_system_status(status: &str) -> String { + format!("System Status: {}\n", status) +} + +pub(super) fn msg_failed_parse_response(response: &str) -> String { + format!("Failed to parse response: {}", response) +} + +pub(super) fn msg_not_ready_components(components: &str) -> String { + format!("Not Ready Components: {}", components) +} + +// Genesis +pub(super) const MSG_GENESIS_FILE_GENERATION_STARTED: &str = "Regenerate genesis file"; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs new file mode 100644 index 00000000000..409c3a764eb --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs @@ -0,0 +1,70 @@ +use clap::Subcommand; +use commands::status::args::StatusArgs; +use messages::MSG_STATUS_ABOUT; +use xshell::Shell; + +use self::commands::{ + clean::CleanCommands, config_writer::ConfigWriterArgs, contracts::ContractsArgs, + database::DatabaseCommands, fmt::FmtArgs, lint::LintArgs, prover::ProverCommands, + send_transactions::args::SendTransactionsArgs, snapshot::SnapshotCommands, test::TestCommands, +}; +use crate::commands::dev::messages::{ + MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, MSG_GENERATE_GENESIS_ABOUT, + MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, + MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, + MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, +}; + +mod commands; +mod consts; +mod dals; +mod defaults; +mod messages; + +#[derive(Subcommand, Debug)] +pub enum DevCommands { + #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT, alias = "db")] + Database(DatabaseCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_TESTS_ABOUT, alias = "t")] + Test(TestCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_CLEAN)] + Clean(CleanCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT)] + Snapshot(SnapshotCommands), + #[command(about = MSG_SUBCOMMAND_LINT_ABOUT, alias = "l")] + Lint(LintArgs), + #[command(about = MSG_SUBCOMMAND_FMT_ABOUT)] + Fmt(FmtArgs), + #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] + Prover(ProverCommands), + #[command(about = MSG_CONTRACTS_ABOUT)] + Contracts(ContractsArgs), + #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] + ConfigWriter(ConfigWriterArgs), + #[command(about = MSG_SEND_TXNS_ABOUT)] + SendTransactions(SendTransactionsArgs), + #[command(about = MSG_STATUS_ABOUT)] + Status(StatusArgs), + #[command(about = MSG_GENERATE_GENESIS_ABOUT, alias = "genesis")] + GenerateGenesis, +} + +pub async fn run(shell: &Shell, args: DevCommands) -> anyhow::Result<()> { + match args { + DevCommands::Database(command) => commands::database::run(shell, command).await?, + DevCommands::Test(command) => commands::test::run(shell, command).await?, + DevCommands::Clean(command) => commands::clean::run(shell, command)?, + DevCommands::Snapshot(command) => commands::snapshot::run(shell, command).await?, + DevCommands::Lint(args) => commands::lint::run(shell, args)?, + DevCommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, + DevCommands::Prover(command) => commands::prover::run(shell, command).await?, + DevCommands::Contracts(args) => commands::contracts::run(shell, args)?, + DevCommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, + DevCommands::SendTransactions(args) => { + commands::send_transactions::run(shell, args).await? + } + DevCommands::Status(args) => commands::status::run(shell, args).await?, + DevCommands::GenerateGenesis => commands::genesis::run(shell).await?, + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/change_default.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/change_default.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/change_default.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/change_default.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs index 2e5c50f4538..6b6c1236d36 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs @@ -1,7 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::bail; -use clap::Parser; +use clap::{Parser, ValueHint}; use common::{cmd::Cmd, logger, Prompt, PromptConfirm, PromptSelect}; use serde::{Deserialize, Serialize}; use slugify_rs::slugify; @@ -26,7 +26,7 @@ pub struct EcosystemCreateArgs { pub ecosystem_name: Option, #[clap(long, help = MSG_L1_NETWORK_HELP, value_enum)] pub l1_network: Option, - #[clap(long, help = MSG_LINK_TO_CODE_HELP)] + #[clap(long, help = MSG_LINK_TO_CODE_HELP, value_hint = ValueHint::DirPath)] pub link_to_code: Option, #[clap(flatten)] #[serde(flatten)] @@ -71,7 +71,13 @@ impl EcosystemCreateArgs { // Make the only chain as a default one self.chain.set_as_default = Some(true); - let chain = self.chain.fill_values_with_prompt(0, &l1_network, vec![])?; + let chain = self.chain.fill_values_with_prompt( + shell, + 0, + &l1_network, + vec![], + Path::new(&link_to_code), + )?; let start_containers = self.start_containers.unwrap_or_else(|| { PromptConfirm::new(MSG_START_CONTAINERS_PROMPT) diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs similarity index 79% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs index 6eb3780755f..9bf332b3bee 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs @@ -10,10 +10,10 @@ use crate::{ commands::chain::args::genesis::GenesisArgs, defaults::LOCAL_RPC_URL, messages::{ - MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEPLOY_PAYMASTER_PROMPT, - MSG_DEV_ARG_HELP, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, - MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, - MSG_OBSERVABILITY_PROMPT, + MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEV_ARG_HELP, + MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, + MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, MSG_OBSERVABILITY_PROMPT, + MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, }, }; @@ -74,9 +74,6 @@ pub struct EcosystemArgsFinal { #[derive(Debug, Clone, Serialize, Deserialize, Parser)] pub struct EcosystemInitArgs { - /// Deploy Paymaster contract - #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub deploy_paymaster: Option, /// Deploy ERC20 contracts #[clap(long, default_missing_value = "true", num_args = 0..=1)] pub deploy_erc20: Option, @@ -86,14 +83,23 @@ pub struct EcosystemInitArgs { #[clap(flatten)] #[serde(flatten)] pub forge_args: ForgeScriptArgs, - #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] - #[serde(flatten)] - pub genesis_args: GenesisArgs, + /// Deploy Paymaster contract + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub deploy_paymaster: Option, + #[clap(long, help = MSG_SERVER_DB_URL_HELP)] + pub server_db_url: Option, + #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] + pub server_db_name: Option, + #[clap(long, short, action)] + pub dont_drop: bool, + /// Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand) + #[clap(long, default_value_t = false)] + pub ecosystem_only: bool, #[clap(long, help = MSG_DEV_ARG_HELP)] pub dev: bool, #[clap(long, short = 'o', help = MSG_OBSERVABILITY_HELP, default_missing_value = "true", num_args = 0..=1)] pub observability: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, #[clap( long, @@ -110,21 +116,24 @@ pub struct EcosystemInitArgs { } impl EcosystemInitArgs { + pub fn get_genesis_args(&self) -> GenesisArgs { + GenesisArgs { + server_db_url: self.server_db_url.clone(), + server_db_name: self.server_db_name.clone(), + dev: self.dev, + dont_drop: self.dont_drop, + } + } + pub fn fill_values_with_prompt(self, l1_network: L1Network) -> EcosystemInitArgsFinal { - let (deploy_paymaster, deploy_erc20) = if self.dev { - (true, true) + let deploy_erc20 = if self.dev { + true } else { - let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { - PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) - .default(true) - .ask() - }); - let deploy_erc20 = self.deploy_erc20.unwrap_or_else(|| { + self.deploy_erc20.unwrap_or_else(|| { PromptConfirm::new(MSG_DEPLOY_ERC20_PROMPT) .default(true) .ask() - }); - (deploy_paymaster, deploy_erc20) + }) }; let ecosystem = self.ecosystem.fill_values_with_prompt(l1_network, self.dev); let observability = if self.dev { @@ -138,12 +147,12 @@ impl EcosystemInitArgs { }; EcosystemInitArgsFinal { - deploy_paymaster, deploy_erc20, ecosystem, forge_args: self.forge_args.clone(), dev: self.dev, observability, + ecosystem_only: self.ecosystem_only, no_port_reallocation: self.no_port_reallocation, skip_submodules_checkout: self.skip_submodules_checkout, skip_contract_compilation_override: self.skip_contract_compilation_override, @@ -153,12 +162,12 @@ impl EcosystemInitArgs { #[derive(Debug, Serialize, Deserialize)] pub struct EcosystemInitArgsFinal { - pub deploy_paymaster: bool, pub deploy_erc20: bool, pub ecosystem: EcosystemArgsFinal, pub forge_args: ForgeScriptArgs, pub dev: bool, pub observability: bool, + pub ecosystem_only: bool, pub no_port_reallocation: bool, pub skip_submodules_checkout: bool, pub skip_contract_compilation_override: bool, diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/change_default.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/change_default.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs index dbd487bff3c..0dcc8e03378 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs @@ -26,9 +26,11 @@ pub async fn deploy_l1( broadcast: bool, ) -> anyhow::Result { let deploy_config_path = DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.input(&config.link_to_code); + dbg!(config.get_default_configs_path()); let default_genesis_config = GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) - .context("Context")?; + .context("failed reading genesis config")?; + dbg!(2); let wallets_config = config.get_wallets()?; // For deploying ecosystem we only need genesis batch params @@ -54,7 +56,7 @@ pub async fn deploy_l1( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key(forge, wallets_config.deployer_private_key())?; + forge = fill_forge_private_key(forge, wallets_config.deployer.as_ref())?; } if broadcast { diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create_configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/create_configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs similarity index 79% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs index fc4dc3ccf57..b823344f9b3 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs @@ -3,6 +3,7 @@ use std::{path::PathBuf, str::FromStr}; use anyhow::Context; use common::{ config::global_config, + contracts::build_system_contracts, forge::{Forge, ForgeScriptArgs}, git, hardhat::{build_l1_contracts, build_l2_contracts}, @@ -28,7 +29,7 @@ use super::{ args::init::{EcosystemArgsFinal, EcosystemInitArgs, EcosystemInitArgsFinal}, common::deploy_l1, setup_observability, - utils::{build_da_contracts, build_system_contracts, install_yarn_dependencies}, + utils::{build_da_contracts, install_yarn_dependencies}, }; use crate::{ accept_ownership::{accept_admin, accept_owner}, @@ -39,9 +40,8 @@ use crate::{ }, }, messages::{ - msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, - msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, - MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, + msg_chain_load_err, msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, + msg_initializing_chain, MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, MSG_DEPLOYING_ERC20_SPINNER, MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, MSG_INITIALIZING_ECOSYSTEM, MSG_INTALLING_DEPS_SPINNER, @@ -62,11 +62,9 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { Err(_) => create_initial_deployments_config(shell, &ecosystem_config.config)?, }; - let mut genesis_args = args.genesis_args.clone(); - if args.dev { - genesis_args.use_default = true; - } - let mut final_ecosystem_args = args.fill_values_with_prompt(ecosystem_config.l1_network); + let mut final_ecosystem_args = args + .clone() + .fill_values_with_prompt(ecosystem_config.l1_network); logger::info(MSG_INITIALIZING_ECOSYSTEM); @@ -74,7 +72,7 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { setup_observability::run(shell)?; } - let contracts_config = init( + let contracts_config = init_ecosystem( &mut final_ecosystem_args, shell, &ecosystem_config, @@ -99,43 +97,17 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { .await?; } - // If the name of chain passed then we deploy exactly this chain otherwise deploy all chains - let list_of_chains = if let Some(name) = global_config().chain_name.clone() { - vec![name] - } else { - ecosystem_config.list_of_chains() - }; - - for chain_name in &list_of_chains { - logger::info(msg_initializing_chain(chain_name)); - let chain_config = ecosystem_config - .load_chain(Some(chain_name.clone())) - .context(MSG_CHAIN_NOT_INITIALIZED)?; - - let mut chain_init_args = chain::args::init::InitArgsFinal { - forge_args: final_ecosystem_args.forge_args.clone(), - genesis_args: genesis_args.clone().fill_values_with_prompt(&chain_config), - deploy_paymaster: final_ecosystem_args.deploy_paymaster, - l1_rpc_url: final_ecosystem_args.ecosystem.l1_rpc_url.clone(), - no_port_reallocation: final_ecosystem_args.no_port_reallocation, - skip_submodules_checkout: final_ecosystem_args.skip_submodules_checkout, - }; - - chain::init::init( - &mut chain_init_args, - shell, - &ecosystem_config, - &chain_config, - ) - .await?; + // Initialize chain(s) + let mut chains: Vec = vec![]; + if !final_ecosystem_args.ecosystem_only { + chains = init_chains(&args, &final_ecosystem_args, shell, &ecosystem_config).await?; } - - logger::outro(msg_ecosystem_initialized(&list_of_chains.join(","))); + logger::outro(msg_ecosystem_initialized(&chains.join(","))); Ok(()) } -async fn init( +async fn init_ecosystem( init_args: &mut EcosystemInitArgsFinal, shell: &Shell, ecosystem_config: &EcosystemConfig, @@ -146,7 +118,7 @@ async fn init( if !init_args.skip_contract_compilation_override { build_da_contracts(shell, &ecosystem_config.link_to_code)?; build_l1_contracts(shell, &ecosystem_config.link_to_code)?; - build_system_contracts(shell, &ecosystem_config.link_to_code)?; + build_system_contracts(shell.clone(), ecosystem_config.link_to_code.clone())?; build_l2_contracts(shell, &ecosystem_config.link_to_code)?; } spinner.finish(); @@ -190,10 +162,7 @@ async fn deploy_erc20( .with_rpc_url(l1_rpc_url) .with_broadcast(); - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.deployer_private_key(), - )?; + forge = fill_forge_private_key(forge, ecosystem_config.get_wallets()?.deployer.as_ref())?; let spinner = Spinner::new(MSG_DEPLOYING_ERC20_SPINNER); check_the_balance(&forge).await?; @@ -301,7 +270,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.ecosystem_contracts.bridgehub_proxy_addr, &forge_args, l1_rpc_url.clone(), @@ -312,7 +281,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.ecosystem_contracts.bridgehub_proxy_addr, &forge_args, l1_rpc_url.clone(), @@ -323,7 +292,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.bridges.shared.l1_address, &forge_args, l1_rpc_url.clone(), @@ -337,7 +306,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .state_transition_proxy_addr, @@ -350,7 +319,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .state_transition_proxy_addr, @@ -363,7 +332,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .stm_deployment_tracker_proxy_addr, @@ -374,3 +343,57 @@ async fn deploy_ecosystem_inner( Ok(contracts_config) } + +async fn init_chains( + init_args: &EcosystemInitArgs, + final_init_args: &EcosystemInitArgsFinal, + shell: &Shell, + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result> { + // If the name of chain passed then we deploy exactly this chain otherwise deploy all chains + let list_of_chains = if let Some(name) = global_config().chain_name.clone() { + vec![name] + } else { + ecosystem_config.list_of_chains() + }; + // Set default values for dev mode + let mut deploy_paymaster = init_args.deploy_paymaster; + let mut genesis_args = init_args.get_genesis_args().clone(); + if final_init_args.dev { + deploy_paymaster = Some(true); + genesis_args.dev = true; + } + // Can't initialize multiple chains with the same DB + if list_of_chains.len() > 1 { + genesis_args.reset_db_names(); + } + // Initialize chains + for chain_name in &list_of_chains { + logger::info(msg_initializing_chain(chain_name)); + let chain_config = ecosystem_config + .load_chain(Some(chain_name.clone())) + .context(msg_chain_load_err(chain_name))?; + + let chain_init_args = chain::args::init::InitArgs { + forge_args: final_init_args.forge_args.clone(), + server_db_url: genesis_args.server_db_url.clone(), + server_db_name: genesis_args.server_db_name.clone(), + dont_drop: genesis_args.dont_drop, + deploy_paymaster, + l1_rpc_url: Some(final_init_args.ecosystem.l1_rpc_url.clone()), + no_port_reallocation: final_init_args.no_port_reallocation, + dev: final_init_args.dev, + skip_submodules_checkout: final_init_args.skip_submodules_checkout, + }; + let final_chain_init_args = chain_init_args.fill_values_with_prompt(&chain_config); + + chain::init::init( + &final_chain_init_args, + shell, + ecosystem_config, + &chain_config, + ) + .await?; + } + Ok(list_of_chains) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/setup_observability.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/setup_observability.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/utils.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/utils.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/backend.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/backend.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/prepare_configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/prepare_configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs similarity index 82% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs index e513a3669e0..8f5f8352458 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs @@ -6,12 +6,12 @@ use config::{ external_node::ENConfig, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; use zksync_config::configs::{ - consensus::{ConsensusSecrets, NodeSecretKey, Secret}, + consensus::{ConsensusConfig, ConsensusSecrets, NodeSecretKey, Secret}, DatabaseSecrets, L1Secrets, }; use zksync_consensus_crypto::TextFmt; @@ -19,14 +19,13 @@ use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, - defaults::PORT_RANGE_END, messages::{ msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PREPARING_EN_CONFIGS, }, utils::{ - consensus::{get_consensus_config, node_public_key}, + consensus::node_public_key, ports::EcosystemPortsScanner, rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, @@ -76,21 +75,15 @@ fn prepare_configs( )?, main_node_rate_limit_rps: None, gateway_url: None, + bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); + general_en.consensus_config = None; let main_node_consensus_config = general .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - ports.add_port_info( - main_node_consensus_config.server_addr.port(), - "Main node consensus".to_string(), - ); - let offset = ((config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut en_consensus_config = main_node_consensus_config.clone(); let mut gossip_static_outbound = BTreeMap::new(); let main_node_public_key = node_public_key( @@ -100,13 +93,8 @@ fn prepare_configs( .context(MSG_CONSENSUS_SECRETS_MISSING_ERR)?, )? .context(MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR)?; - gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); - - let en_consensus_config = - get_consensus_config(config, consensus_port, None, Some(gossip_static_outbound))?; - general_en.consensus_config = Some(en_consensus_config.clone()); - en_consensus_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.gossip_static_outbound = gossip_static_outbound; // Set secrets config let node_key = roles::node::SecretKey::generate().encode(); @@ -128,16 +116,25 @@ fn prepare_configs( }), data_availability: None, }; - secrets.save_with_base_path(shell, en_configs_path)?; + let dirs = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::ExternalNode)?; set_rocks_db_config(&mut general_en, dirs)?; + general_en.save_with_base_path(shell, en_configs_path)?; en_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.save_with_base_path(shell, en_configs_path)?; + secrets.save_with_base_path(shell, en_configs_path)?; + let offset = 0; // This is zero because general_en ports already have a chain offset ports.allocate_ports_in_yaml( shell, &GeneralConfig::get_path_with_base_path(en_configs_path), - 0, // This is zero because general_en ports already have a chain offset + offset, + )?; + ports.allocate_ports_in_yaml( + shell, + &ConsensusConfig::get_path_with_base_path(en_configs_path), + offset, )?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/mod.rs similarity index 86% rename from zk_toolbox/crates/zk_inception/src/commands/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/mod.rs index 78a46797602..b5319cbc6bf 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/mod.rs @@ -1,8 +1,10 @@ pub mod args; +pub mod autocomplete; pub mod chain; pub mod consensus; pub mod containers; pub mod contract_verifier; +pub mod dev; pub mod ecosystem; pub mod explorer; pub mod external_node; diff --git a/zk_toolbox/crates/zk_inception/src/commands/portal.rs b/zkstack_cli/crates/zkstack/src/commands/portal.rs similarity index 98% rename from zk_toolbox/crates/zk_inception/src/commands/portal.rs rename to zkstack_cli/crates/zkstack/src/commands/portal.rs index 5bf21121177..f9e7fe35860 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/portal.rs +++ b/zkstack_cli/crates/zkstack/src/commands/portal.rs @@ -107,7 +107,7 @@ async fn validate_portal_config( continue; } // Append missing chain, chain might not be initialized, so ignoring errors - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { if let Ok(portal_chain_config) = build_portal_chain_config(&chain_config).await { portal_config.add_chain_config(&portal_chain_config); } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/compressor_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/compressor_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs similarity index 92% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs index 94fea1389d2..fab79899302 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs @@ -33,6 +33,9 @@ use crate::{ #[derive(Debug, Clone, Parser, Default)] pub struct ProverInitArgs { + #[clap(long)] + pub dev: bool, + // Proof store object #[clap(long)] pub proof_store_dir: Option, @@ -58,7 +61,7 @@ pub struct ProverInitArgs { pub bellman_cuda: Option, #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub setup_compressor_keys: Option, + pub setup_compressor_key: Option, #[clap(flatten)] pub compressor_keys_args: CompressorKeysArgs, @@ -228,6 +231,10 @@ impl ProverInitArgs { ) -> anyhow::Result { logger::info(MSG_GETTING_PROOF_STORE_CONFIG); + if self.dev { + return Ok(self.handle_file_backed_config(Some(DEFAULT_PROOF_STORE_DIR.to_string()))); + } + if self.proof_store_dir.is_some() { return Ok(self.handle_file_backed_config(self.proof_store_dir.clone())); } @@ -277,6 +284,11 @@ impl ProverInitArgs { shell: &Shell, ) -> anyhow::Result> { logger::info(MSG_GETTING_PUBLIC_STORE_CONFIG); + + if self.dev { + return Ok(None); + } + let shall_save_to_public_bucket = self .shall_save_to_public_bucket .unwrap_or_else(|| PromptConfirm::new(MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT).ask()); @@ -345,7 +357,13 @@ impl ProverInitArgs { &self, default_path: &str, ) -> Option { - let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { + if self.dev { + return Some(CompressorKeysArgs { + path: Some(default_path.to_string()), + }); + } + + let download_key = self.clone().setup_compressor_key.unwrap_or_else(|| { PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) .default(false) .ask() @@ -363,6 +381,9 @@ impl ProverInitArgs { } fn fill_setup_keys_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } let args = self.setup_keys_args.clone(); if self.setup_keys.unwrap_or_else(|| { @@ -475,6 +496,10 @@ impl ProverInitArgs { } fn fill_bellman_cuda_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } + let args = self.bellman_cuda_config.clone(); if self.bellman_cuda.unwrap_or_else(|| { PromptConfirm::new(MSG_INITIALIZE_BELLMAN_CUDA_PROMPT) @@ -488,6 +513,10 @@ impl ProverInitArgs { } fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { + if self.dev { + return CloudConnectionMode::Local; + } + let cloud_type = self.cloud_type.clone().unwrap_or_else(|| { PromptSelect::new( MSG_CLOUD_TYPE_PROMPT, @@ -503,25 +532,32 @@ impl ProverInitArgs { &self, config: &ChainConfig, ) -> Option { - let setup_database = self - .setup_database - .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); + let setup_database = self.dev + || self + .setup_database + .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); if setup_database { let DBNames { prover_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); - let dont_drop = self.dont_drop.unwrap_or_else(|| { - !PromptConfirm::new("Do you want to drop the database?") - .default(true) - .ask() - }); + let dont_drop = if !self.dev { + self.dont_drop.unwrap_or_else(|| { + !PromptConfirm::new("Do you want to drop the database?") + .default(true) + .ask() + }) + } else { + false + }; - if self.use_default.unwrap_or_else(|| { - PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) - .default(true) - .ask() - }) { + if self.dev + || self.use_default.unwrap_or_else(|| { + PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) + .default(true) + .ask() + }) + { Some(ProverDatabaseConfig { database_config: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop, diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs similarity index 58% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs index ba204b0be9e..98a5c78be2a 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs @@ -10,7 +10,9 @@ use crate::messages::{ #[derive(Debug, Clone, Parser, Default, Serialize, Deserialize)] pub struct InitBellmanCudaArgs { - #[clap(long)] + #[clap(long, conflicts_with_all(["bellman_cuda_dir"]))] + pub clone: bool, + #[clap(long, conflicts_with_all(["clone"]))] pub bellman_cuda_dir: Option, } @@ -31,19 +33,26 @@ impl std::fmt::Display for BellmanCudaPathSelection { impl InitBellmanCudaArgs { pub fn fill_values_with_prompt(self) -> InitBellmanCudaArgs { - let bellman_cuda_dir = self.bellman_cuda_dir.unwrap_or_else(|| { - match PromptSelect::new( - MSG_BELLMAN_CUDA_ORIGIN_SELECT, - BellmanCudaPathSelection::iter(), - ) - .ask() - { - BellmanCudaPathSelection::Clone => "".to_string(), - BellmanCudaPathSelection::Path => Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask(), - } - }); + let bellman_cuda_dir = if self.clone { + "".to_string() + } else { + self.bellman_cuda_dir.unwrap_or_else(|| { + match PromptSelect::new( + MSG_BELLMAN_CUDA_ORIGIN_SELECT, + BellmanCudaPathSelection::iter(), + ) + .ask() + { + BellmanCudaPathSelection::Clone => "".to_string(), + BellmanCudaPathSelection::Path => { + Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask() + } + } + }) + }; InitBellmanCudaArgs { + clone: self.clone, bellman_cuda_dir: Some(bellman_cuda_dir), } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs similarity index 77% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs index 59a82152f1f..b79af777673 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs @@ -8,7 +8,8 @@ use strum::{EnumIter, IntoEnumIterator}; use crate::{ consts::{ - COMPRESSOR_BINARY_NAME, COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, + CIRCUIT_PROVER_BINARY_NAME, CIRCUIT_PROVER_DOCKER_IMAGE, COMPRESSOR_BINARY_NAME, + COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, PROVER_GATEWAY_BINARY_NAME, PROVER_GATEWAY_DOCKER_IMAGE, PROVER_JOB_MONITOR_BINARY_NAME, PROVER_JOB_MONITOR_DOCKER_IMAGE, WITNESS_GENERATOR_BINARY_NAME, WITNESS_GENERATOR_DOCKER_IMAGE, WITNESS_VECTOR_GENERATOR_BINARY_NAME, @@ -30,8 +31,12 @@ pub struct ProverRunArgs { pub witness_vector_generator_args: WitnessVectorGeneratorArgs, #[clap(flatten)] pub fri_prover_args: FriProverRunArgs, + #[clap(flatten)] + pub circuit_prover_args: CircuitProverArgs, #[clap(long)] pub docker: Option, + #[clap(long)] + pub tag: Option, } #[derive( @@ -46,6 +51,8 @@ pub enum ProverComponent { WitnessVectorGenerator, #[strum(to_string = "Prover")] Prover, + #[strum(to_string = "CircuitProver")] + CircuitProver, #[strum(to_string = "Compressor")] Compressor, #[strum(to_string = "ProverJobMonitor")] @@ -59,6 +66,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_DOCKER_IMAGE, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, Self::Prover => PROVER_DOCKER_IMAGE, + Self::CircuitProver => CIRCUIT_PROVER_DOCKER_IMAGE, Self::Compressor => COMPRESSOR_DOCKER_IMAGE, Self::ProverJobMonitor => PROVER_JOB_MONITOR_DOCKER_IMAGE, } @@ -70,6 +78,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_BINARY_NAME, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_BINARY_NAME, Self::Prover => PROVER_BINARY_NAME, + Self::CircuitProver => CIRCUIT_PROVER_BINARY_NAME, Self::Compressor => COMPRESSOR_BINARY_NAME, Self::ProverJobMonitor => PROVER_JOB_MONITOR_BINARY_NAME, } @@ -78,10 +87,10 @@ impl ProverComponent { pub fn get_application_args(&self, in_docker: bool) -> anyhow::Result> { let mut application_args = vec![]; - if self == &Self::Prover || self == &Self::Compressor { + if self == &Self::Prover || self == &Self::Compressor || self == &Self::CircuitProver { if in_docker { application_args.push("--gpus=all".to_string()); - } else { + } else if self != &Self::CircuitProver { application_args.push("--features=gpu".to_string()); } } @@ -160,6 +169,26 @@ impl ProverComponent { )); }; } + Self::CircuitProver => { + if args.circuit_prover_args.max_allocation.is_some() { + additional_args.push(format!( + "--max-allocation={}", + args.fri_prover_args.max_allocation.unwrap() + )); + }; + if args + .circuit_prover_args + .witness_vector_generator_count + .is_some() + { + additional_args.push(format!( + "--witness-vector-generator-count={}", + args.circuit_prover_args + .witness_vector_generator_count + .unwrap() + )); + }; + } _ => {} }; @@ -211,6 +240,37 @@ impl WitnessVectorGeneratorArgs { } } +#[derive(Debug, Clone, Parser, Default)] +pub struct CircuitProverArgs { + #[clap(long)] + pub witness_vector_generator_count: Option, + #[clap(long)] + pub max_allocation: Option, +} + +impl CircuitProverArgs { + pub fn fill_values_with_prompt( + self, + component: ProverComponent, + ) -> anyhow::Result { + if component != ProverComponent::CircuitProver { + return Ok(Self::default()); + } + + let witness_vector_generator_count = + self.witness_vector_generator_count.unwrap_or_else(|| { + Prompt::new("Number of WVG jobs to run in parallel") + .default("1") + .ask() + }); + + Ok(CircuitProverArgs { + witness_vector_generator_count: Some(witness_vector_generator_count), + max_allocation: self.max_allocation, + }) + } +} + #[derive(Debug, Clone, Parser, Default)] pub struct FriProverRunArgs { /// Memory allocation limit in bytes (for prover component) @@ -232,18 +292,26 @@ impl ProverRunArgs { .witness_vector_generator_args .fill_values_with_prompt(component)?; + let circuit_prover_args = self + .circuit_prover_args + .fill_values_with_prompt(component)?; + let docker = self.docker.unwrap_or_else(|| { Prompt::new("Do you want to run Docker image for the component?") .default("false") .ask() }); + let tag = self.tag.unwrap_or("latest2.0".to_string()); + Ok(ProverRunArgs { component: Some(component), witness_generator_args, witness_vector_generator_args, fri_prover_args: self.fri_prover_args, + circuit_prover_args, docker: Some(docker), + tag: Some(tag), }) } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/setup_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/setup_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs similarity index 80% rename from zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs index 703ecc18c4c..a3d40c95728 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs @@ -1,7 +1,7 @@ use anyhow::Context; -use common::{check_prerequisites, cmd::Cmd, spinner::Spinner, WGET_PREREQUISITE}; +use common::spinner::Spinner; use config::{get_link_to_prover, EcosystemConfig, GeneralConfig}; -use xshell::{cmd, Shell}; +use xshell::Shell; use super::args::compressor_keys::CompressorKeysArgs; use crate::messages::{ @@ -35,7 +35,6 @@ pub(crate) fn download_compressor_key( general_config: &mut GeneralConfig, path: &str, ) -> anyhow::Result<()> { - check_prerequisites(shell, &WGET_PREREQUISITE, false); let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER); let mut compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config .proof_compressor_config @@ -47,14 +46,13 @@ pub(crate) fn download_compressor_key( let url = compressor_config.universal_setup_download_url; let path = std::path::Path::new(path); - let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); - let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); - Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; + let client = reqwest::blocking::Client::builder() + .timeout(std::time::Duration::from_secs(600)) + .build()?; - if file_name != "setup_2^24.key" { - Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; - } + let response = client.get(url).send()?.bytes()?; + shell.write_file(path, &response)?; spinner.finish(); Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs b/zkstack_cli/crates/zkstack/src/commands/prover/gcs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/gcs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/init.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/init_bellman_cuda.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/init_bellman_cuda.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zkstack_cli/crates/zkstack/src/commands/prover/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs similarity index 88% rename from zk_toolbox/crates/zk_inception/src/commands/prover/run.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/run.rs index ed2f5b41a86..85495d12404 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs @@ -8,7 +8,8 @@ use xshell::{cmd, Shell}; use super::args::run::{ProverComponent, ProverRunArgs}; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, - MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, + MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR, MSG_RUNNING_COMPRESSOR, + MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, MSG_RUNNING_PROVER_JOB_MONITOR_ERR, MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, @@ -32,7 +33,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let application_args = component.get_application_args(in_docker)?; let additional_args = - component.get_additional_args(in_docker, args, &chain, &path_to_ecosystem)?; + component.get_additional_args(in_docker, args.clone(), &chain, &path_to_ecosystem)?; let (message, error) = match component { ProverComponent::WitnessGenerator => ( @@ -49,6 +50,12 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() } (MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR) } + ProverComponent::CircuitProver => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + } + (MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR) + } ProverComponent::Compressor => { if !in_docker { check_prerequisites(shell, &GPU_PREREQUISITES, false); @@ -76,6 +83,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() run_dockerized_component( shell, component.image_name(), + &args.tag.unwrap(), &application_args, &additional_args, message, @@ -103,6 +111,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() fn run_dockerized_component( shell: &Shell, image_name: &str, + tag: &str, application_args: &[String], args: &[String], message: &'static str, @@ -117,7 +126,7 @@ fn run_dockerized_component( let mut cmd = Cmd::new(cmd!( shell, - "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" + "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name}:{tag} {args...}" )); cmd = cmd.with_force_run(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/setup_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/setup_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zkstack_cli/crates/zkstack/src/commands/server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/server.rs rename to zkstack_cli/crates/zkstack/src/commands/server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/update.rs b/zkstack_cli/crates/zkstack/src/commands/update.rs similarity index 95% rename from zk_toolbox/crates/zk_inception/src/commands/update.rs rename to zkstack_cli/crates/zkstack/src/commands/update.rs index 5cb7208ffd0..534d490e6ca 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/update.rs +++ b/zkstack_cli/crates/zkstack/src/commands/update.rs @@ -8,11 +8,10 @@ use common::{ yaml::{merge_yaml, ConfigDiff}, }; use config::{ - traits::ReadConfigWithBasePath, ChainConfig, EcosystemConfig, CONTRACTS_FILE, EN_CONFIG_FILE, - ERA_OBSERBAVILITY_DIR, GENERAL_FILE, GENESIS_FILE, SECRETS_FILE, + ChainConfig, EcosystemConfig, CONTRACTS_FILE, EN_CONFIG_FILE, ERA_OBSERBAVILITY_DIR, + GENERAL_FILE, GENESIS_FILE, SECRETS_FILE, }; use xshell::Shell; -use zksync_config::configs::Secrets; use super::args::UpdateArgs; use crate::{ @@ -183,7 +182,7 @@ async fn update_chain( )?; } - let secrets = Secrets::read_with_base_path(shell, secrets)?; + let secrets = chain.get_secrets_config()?; if let Some(db) = secrets.database { if let Some(url) = db.server_url { diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs similarity index 61% rename from zk_toolbox/crates/zk_inception/src/consts.rs rename to zkstack_cli/crates/zkstack/src/consts.rs index 9f81847e333..b7c4d2a2070 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zkstack_cli/crates/zkstack/src/consts.rs @@ -1,5 +1,3 @@ -use std::net::{IpAddr, Ipv4Addr}; - pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; @@ -12,27 +10,6 @@ pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; -#[allow(non_upper_case_globals)] -const kB: usize = 1024; - -/// Max payload size for consensus in bytes -pub const MAX_PAYLOAD_SIZE: usize = 2_500_000; -/// Max batch size for consensus in bytes -/// Compute a default batch size, so operators are not caught out by the missing setting -/// while we're still working on batch syncing. The batch interval is ~1 minute, -/// so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high -/// traffic there can be thousands of huge transactions that quickly fill up blocks -/// and there could be more blocks in a batch then expected. We chose a generous -/// limit so as not to prevent any legitimate batch from being transmitted. -pub const MAX_BATCH_SIZE: usize = MAX_PAYLOAD_SIZE * 5000 + kB; -/// Gossip dynamic inbound limit for consensus -pub const GOSSIP_DYNAMIC_INBOUND_LIMIT: usize = 100; - -/// Public address for consensus -pub const CONSENSUS_PUBLIC_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); -/// Server address for consensus -pub const CONSENSUS_SERVER_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); - /// Path to the JS runtime config for the block-explorer-app docker container to be mounted to pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; @@ -40,18 +17,19 @@ pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; -pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway:latest2.0"; -pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:latest2.0"; -pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = - "matterlabs/witness-vector-generator:latest2.0"; -pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; -pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; -pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; +pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway"; +pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator"; +pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-vector-generator"; +pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri"; +pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu"; +pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor"; +pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor"; pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_generator"; pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; +pub const CIRCUIT_PROVER_BINARY_NAME: &str = "zksync_circuit_prover"; pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zkstack_cli/crates/zkstack/src/defaults.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/defaults.rs rename to zkstack_cli/crates/zkstack/src/defaults.rs diff --git a/zk_toolbox/crates/zk_inception/src/external_node.rs b/zkstack_cli/crates/zkstack/src/external_node.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/external_node.rs rename to zkstack_cli/crates/zkstack/src/external_node.rs diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zkstack_cli/crates/zkstack/src/main.rs similarity index 56% rename from zk_toolbox/crates/zk_inception/src/main.rs rename to zkstack_cli/crates/zkstack/src/main.rs index 0af9922d0c4..3ebe26a4fa2 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zkstack_cli/crates/zkstack/src/main.rs @@ -1,7 +1,8 @@ use clap::{command, Parser, Subcommand}; use commands::{ - args::{ContainersArgs, UpdateArgs}, + args::{AutocompleteArgs, ContainersArgs, UpdateArgs}, contract_verifier::ContractVerifierCommands, + dev::DevCommands, }; use common::{ check_general_prerequisites, @@ -28,30 +29,36 @@ mod utils; #[derive(Parser, Debug)] #[command( + name = "zkstack", version = version_message(env!("CARGO_PKG_VERSION")), about )] -struct Inception { +struct ZkStack { #[command(subcommand)] - command: InceptionSubcommands, + command: ZkStackSubcommands, #[clap(flatten)] - global: InceptionGlobalArgs, + global: ZkStackGlobalArgs, } #[derive(Subcommand, Debug)] -pub enum InceptionSubcommands { +pub enum ZkStackSubcommands { + /// Create shell autocompletion files + Autocomplete(AutocompleteArgs), /// Ecosystem related commands #[command(subcommand, alias = "e")] - Ecosystem(EcosystemCommands), + Ecosystem(Box), /// Chain related commands #[command(subcommand, alias = "c")] - Chain(ChainCommands), + Chain(Box), + /// Supervisor related commands + #[command(subcommand)] + Dev(DevCommands), /// Prover related commands #[command(subcommand, alias = "p")] Prover(ProverCommands), /// Run server Server(RunServerArgs), - /// External Node related commands + /// External Node related commands #[command(subcommand, alias = "en")] ExternalNode(ExternalNodeCommands), /// Run containers for local development @@ -65,18 +72,20 @@ pub enum InceptionSubcommands { /// Run block-explorer #[command(subcommand)] Explorer(ExplorerCommands), - /// Update ZKsync + /// Consensus utilities #[command(subcommand)] Consensus(consensus::Command), + /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), + /// Print markdown help #[command(hide = true)] Markdown, } #[derive(Parser, Debug)] #[clap(next_help_heading = "Global options")] -struct InceptionGlobalArgs { +struct ZkStackGlobalArgs { /// Verbose mode #[clap(short, long, global = true)] verbose: bool, @@ -94,8 +103,20 @@ async fn main() -> anyhow::Result<()> { // We must parse arguments before printing the intro, because some autogenerated // Clap commands (like `--version` would look odd otherwise). - let inception_args = Inception::parse(); + let zkstack_args = ZkStack::parse(); + + match run_subcommand(zkstack_args).await { + Ok(_) => {} + Err(error) => { + log_error(error); + std::process::exit(1); + } + } + + Ok(()) +} +async fn run_subcommand(zkstack_args: ZkStack) -> anyhow::Result<()> { init_prompt_theme(); logger::new_empty_line(); @@ -103,51 +124,39 @@ async fn main() -> anyhow::Result<()> { let shell = Shell::new().unwrap(); - init_global_config_inner(&shell, &inception_args.global)?; + init_global_config_inner(&shell, &zkstack_args.global)?; if !global_config().ignore_prerequisites { check_general_prerequisites(&shell); } - match run_subcommand(inception_args, &shell).await { - Ok(_) => {} - Err(error) => { - log_error(error); - std::process::exit(1); - } - } - Ok(()) -} - -async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Result<()> { - match inception_args.command { - InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, args).await?, - InceptionSubcommands::Chain(args) => commands::chain::run(shell, args).await?, - InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, - InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, - InceptionSubcommands::Containers(args) => commands::containers::run(shell, args)?, - InceptionSubcommands::ExternalNode(args) => { - commands::external_node::run(shell, args).await? + match zkstack_args.command { + ZkStackSubcommands::Autocomplete(args) => commands::autocomplete::run(args)?, + ZkStackSubcommands::Ecosystem(args) => commands::ecosystem::run(&shell, *args).await?, + ZkStackSubcommands::Chain(args) => commands::chain::run(&shell, *args).await?, + ZkStackSubcommands::Dev(args) => commands::dev::run(&shell, args).await?, + ZkStackSubcommands::Prover(args) => commands::prover::run(&shell, args).await?, + ZkStackSubcommands::Server(args) => commands::server::run(&shell, args)?, + ZkStackSubcommands::Containers(args) => commands::containers::run(&shell, args)?, + ZkStackSubcommands::ExternalNode(args) => { + commands::external_node::run(&shell, args).await? } - InceptionSubcommands::ContractVerifier(args) => { - commands::contract_verifier::run(shell, args).await? + ZkStackSubcommands::ContractVerifier(args) => { + commands::contract_verifier::run(&shell, args).await? } - InceptionSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, - InceptionSubcommands::Consensus(cmd) => cmd.run(shell).await?, - InceptionSubcommands::Portal => commands::portal::run(shell).await?, - InceptionSubcommands::Update(args) => commands::update::run(shell, args).await?, - InceptionSubcommands::Markdown => { - clap_markdown::print_help_markdown::(); + ZkStackSubcommands::Explorer(args) => commands::explorer::run(&shell, args).await?, + ZkStackSubcommands::Consensus(cmd) => cmd.run(&shell).await?, + ZkStackSubcommands::Portal => commands::portal::run(&shell).await?, + ZkStackSubcommands::Update(args) => commands::update::run(&shell, args).await?, + ZkStackSubcommands::Markdown => { + clap_markdown::print_help_markdown::(); } } Ok(()) } -fn init_global_config_inner( - shell: &Shell, - inception_args: &InceptionGlobalArgs, -) -> anyhow::Result<()> { - if let Some(name) = &inception_args.chain { +fn init_global_config_inner(shell: &Shell, zkstack_args: &ZkStackGlobalArgs) -> anyhow::Result<()> { + if let Some(name) = &zkstack_args.chain { if let Ok(config) = EcosystemConfig::from_file(shell) { let chains = config.list_of_chains(); if !chains.contains(name) { @@ -160,9 +169,9 @@ fn init_global_config_inner( } } init_global_config(GlobalConfig { - verbose: inception_args.verbose, - chain_name: inception_args.chain.clone(), - ignore_prerequisites: inception_args.ignore_prerequisites, + verbose: zkstack_args.verbose, + chain_name: zkstack_args.chain.clone(), + ignore_prerequisites: zkstack_args.ignore_prerequisites, }); Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/messages.rs rename to zkstack_cli/crates/zkstack/src/messages.rs index c539afc5144..a985b4238bd 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -15,6 +15,15 @@ pub(super) const MSG_SELECTED_CONFIG: &str = "Selected config"; pub(super) const MSG_CHAIN_NOT_INITIALIZED: &str = "Chain not initialized. Please create a chain first"; pub(super) const MSG_ARGS_VALIDATOR_ERR: &str = "Invalid arguments"; +pub(super) const MSG_DEV_ARG_HELP: &str = + "Use defaults for all options and flags. Suitable for local development"; + +/// Autocomplete message +pub(super) fn msg_generate_autocomplete_file(filename: &str) -> String { + format!("Generating completion file: {filename}") +} +pub(super) const MSG_OUTRO_AUTOCOMPLETE_GENERATION: &str = + "Autocompletion file correctly generated"; /// Ecosystem create related messages pub(super) const MSG_L1_NETWORK_HELP: &str = "L1 Network"; @@ -54,8 +63,6 @@ pub(super) fn msg_path_to_zksync_does_not_exist_err(path: &str) -> String { pub(super) const MSG_L1_RPC_URL_HELP: &str = "L1 RPC URL"; pub(super) const MSG_NO_PORT_REALLOCATION_HELP: &str = "Do not reallocate ports"; pub(super) const MSG_GENESIS_ARGS_HELP: &str = "Genesis options"; -pub(super) const MSG_DEV_ARG_HELP: &str = - "Deploy ecosystem using all defaults. Suitable for local development"; pub(super) const MSG_OBSERVABILITY_HELP: &str = "Enable Grafana"; pub(super) const MSG_OBSERVABILITY_PROMPT: &str = "Do you want to setup observability? (Grafana)"; pub(super) const MSG_DEPLOY_ECOSYSTEM_PROMPT: &str = @@ -72,6 +79,10 @@ pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; pub(super) const MSG_INITIALIZING_ECOSYSTEM: &str = "Initializing ecosystem"; pub(super) const MSG_DEPLOYING_ERC20: &str = "Deploying ERC20 contracts"; pub(super) const MSG_CHAIN_INITIALIZED: &str = "Chain initialized successfully"; +pub(super) const MSG_CHAIN_CONFIGS_INITIALIZED: &str = "Chain configs were initialized"; +pub(super) const MSG_CHAIN_OWNERSHIP_TRANSFERRED: &str = + "Chain ownership was transferred successfully"; +pub(super) const MSG_CHAIN_REGISTERED: &str = "Chain registraion was successful"; pub(super) const MSG_DISTRIBUTING_ETH_SPINNER: &str = "Distributing eth..."; pub(super) const MSG_MINT_BASE_TOKEN_SPINNER: &str = "Minting base token to the governance addresses..."; @@ -101,7 +112,11 @@ pub(super) fn msg_initializing_chain(chain_name: &str) -> String { } pub(super) fn msg_ecosystem_initialized(chains: &str) -> String { - format!("Ecosystem initialized successfully with chains {chains}") + if chains.is_empty() { + "Ecosystem initialized successfully. You can initialize chain with `chain init`".to_string() + } else { + format!("Ecosystem initialized successfully with chains {chains}") + } } /// Ecosystem default related messages @@ -142,6 +157,7 @@ pub(super) const MSG_BASE_TOKEN_ADDRESS_HELP: &str = "Base token address"; pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP: &str = "Base token nominator"; pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_HELP: &str = "Base token denominator"; pub(super) const MSG_SET_AS_DEFAULT_HELP: &str = "Set as default chain"; +pub(super) const MSG_EVM_EMULATOR_HELP: &str = "Enable EVM emulator"; pub(super) const MSG_CHAIN_NAME_PROMPT: &str = "What do you want to name the chain?"; pub(super) const MSG_CHAIN_ID_PROMPT: &str = "What's the chain id?"; pub(super) const MSG_WALLET_CREATION_PROMPT: &str = "Select how do you want to create the wallet"; @@ -156,6 +172,7 @@ pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT: &str = pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT: &str = "What is the base token price denominator?"; pub(super) const MSG_SET_AS_DEFAULT_PROMPT: &str = "Set this chain as default?"; +pub(super) const MSG_EVM_EMULATOR_PROMPT: &str = "Enable EVM emulator?"; pub(super) const MSG_WALLET_PATH_INVALID_ERR: &str = "Invalid path"; pub(super) const MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR: &str = "Number is not zero"; pub(super) const MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR: &str = @@ -170,6 +187,9 @@ pub(super) const MSG_WALLET_CREATION_VALIDATOR_ERR: &str = "Localhost wallet is not supported for external networks"; pub(super) const MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND: &str = "Token Multiplier Setter not found. Specify it in a wallet config"; +pub(super) const MSG_EVM_EMULATOR_HASH_MISSING_ERR: &str = + "Impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash"; /// Chain genesis related messages pub(super) const MSG_L1_SECRETS_MUST_BE_PRESENTED: &str = "L1 secret must be presented"; @@ -188,6 +208,7 @@ pub(super) const MSG_INITIALIZING_SERVER_DATABASE: &str = "Initializing server d pub(super) const MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR: &str = "Failed to drop server database"; pub(super) const MSG_INITIALIZING_PROVER_DATABASE: &str = "Initializing prover database"; pub(super) const MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR: &str = "Failed to drop prover database"; +pub(super) const MSG_GENESIS_DATABASES_INITIALIZED: &str = "Databases initialized successfully"; /// Chain update related messages pub(super) const MSG_WALLETS_CONFIG_MUST_BE_PRESENT: &str = "Wallets configuration must be present"; @@ -288,7 +309,7 @@ pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR: &str = pub(super) const MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR: &str = "Failed to create explorer config"; pub(super) const MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = - "Failed to find any valid chain to run explorer for. Did you run `zk_inception explorer init`?"; + "Failed to find any valid chain to run explorer for. Did you run `zkstack explorer init`?"; pub(super) const MSG_EXPLORER_INITIALIZED: &str = "Explorer has been initialized successfully"; pub(super) fn msg_explorer_initializing_database_for(chain: &str) -> String { format!("Initializing explorer database for {chain} chain") @@ -303,7 +324,7 @@ pub(super) fn msg_explorer_starting_on(host: &str, port: u16) -> String { format!("Starting explorer on http://{host}:{port}") } pub(super) fn msg_explorer_chain_not_initialized(chain: &str) -> String { - format!("Chain {chain} is not initialized for explorer: run `zk_inception explorer init --chain {chain}` first") + format!("Chain {chain} is not initialized for explorer: run `zkstack explorer init --chain {chain}` first") } /// Forge utils related messages @@ -343,6 +364,7 @@ pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job moni pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; pub(super) const MSG_RUNNING_PROVER: &str = "Running prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER: &str = "Running circuit prover"; pub(super) const MSG_RUNNING_COMPRESSOR: &str = "Running compressor"; pub(super) const MSG_RUN_COMPONENT_PROMPT: &str = "What component do you want to run?"; pub(super) const MSG_RUNNING_PROVER_GATEWAY_ERR: &str = "Failed to run prover gateway"; @@ -351,6 +373,7 @@ pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR: &str = "Failed to run witness vector generator"; pub(super) const MSG_RUNNING_COMPRESSOR_ERR: &str = "Failed to run compressor"; pub(super) const MSG_RUNNING_PROVER_ERR: &str = "Failed to run prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER_ERR: &str = "Failed to run circuit prover"; pub(super) const MSG_PROOF_STORE_CONFIG_PROMPT: &str = "Select where you would like to store the proofs"; pub(super) const MSG_PROOF_STORE_DIR_PROMPT: &str = diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zkstack_cli/crates/zkstack/src/utils/consensus.rs similarity index 70% rename from zk_toolbox/crates/zk_inception/src/utils/consensus.rs rename to zkstack_cli/crates/zkstack/src/utils/consensus.rs index 2979b4df0c1..946d28a33fb 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zkstack_cli/crates/zkstack/src/utils/consensus.rs @@ -1,24 +1,14 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - net::SocketAddr, -}; - use anyhow::Context as _; use config::ChainConfig; use secrecy::{ExposeSecret, Secret}; use zksync_config::configs::consensus::{ - AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, - NodePublicKey, NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, - WeightedAttester, WeightedValidator, + AttesterPublicKey, AttesterSecretKey, ConsensusSecrets, GenesisSpec, NodePublicKey, + NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, WeightedAttester, + WeightedValidator, }; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::{attester, node, validator}; -use crate::consts::{ - CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, - MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, -}; - pub(crate) fn parse_attester_committee( attesters: &[WeightedAttester], ) -> anyhow::Result { @@ -48,32 +38,6 @@ pub struct ConsensusPublicKeys { attester_key: attester::PublicKey, } -pub fn get_consensus_config( - chain_config: &ChainConfig, - consensus_port: u16, - consensus_keys: Option, - gossip_static_outbound: Option>, -) -> anyhow::Result { - let genesis_spec = - consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); - - let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, consensus_port); - let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, consensus_port); - - Ok(ConsensusConfig { - server_addr, - public_addr: Host(public_addr.encode()), - genesis_spec, - max_payload_size: MAX_PAYLOAD_SIZE, - gossip_dynamic_inbound_limit: GOSSIP_DYNAMIC_INBOUND_LIMIT, - max_batch_size: MAX_BATCH_SIZE, - gossip_static_inbound: BTreeSet::new(), - gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), - rpc: None, - debug_page_addr: None, - }) -} - pub fn generate_consensus_keys() -> ConsensusSecretKeys { ConsensusSecretKeys { validator_key: validator::SecretKey::generate(), diff --git a/zk_toolbox/crates/zk_inception/src/utils/forge.rs b/zkstack_cli/crates/zkstack/src/utils/forge.rs similarity index 74% rename from zk_toolbox/crates/zk_inception/src/utils/forge.rs rename to zkstack_cli/crates/zkstack/src/utils/forge.rs index cabc8ff7566..355cf7b5f93 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/forge.rs +++ b/zkstack_cli/crates/zkstack/src/utils/forge.rs @@ -1,6 +1,6 @@ -use anyhow::anyhow; -use common::forge::ForgeScript; -use ethers::types::{H256, U256}; +use anyhow::Context as _; +use common::{forge::ForgeScript, wallets::Wallet}; +use ethers::types::U256; use crate::{ consts::MINIMUM_BALANCE_FOR_WALLET, @@ -9,10 +9,14 @@ use crate::{ pub fn fill_forge_private_key( mut forge: ForgeScript, - private_key: Option, + wallet: Option<&Wallet>, ) -> anyhow::Result { if !forge.wallet_args_passed() { - forge = forge.with_private_key(private_key.ok_or(anyhow!(MSG_DEPLOYER_PK_NOT_SET_ERR))?); + forge = forge.with_private_key( + wallet + .and_then(|w| w.private_key_h256()) + .context(MSG_DEPLOYER_PK_NOT_SET_ERR)?, + ); } Ok(forge) } diff --git a/zk_toolbox/crates/zk_inception/src/utils/mod.rs b/zkstack_cli/crates/zkstack/src/utils/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/mod.rs rename to zkstack_cli/crates/zkstack/src/utils/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zkstack_cli/crates/zkstack/src/utils/ports.rs similarity index 82% rename from zk_toolbox/crates/zk_inception/src/utils/ports.rs rename to zkstack_cli/crates/zkstack/src/utils/ports.rs index 5102b4fd9c6..6c299b99913 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/ports.rs +++ b/zkstack_cli/crates/zkstack/src/utils/ports.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, ops::Range, path::Path}; +use std::{collections::HashMap, fmt, net::SocketAddr, ops::Range, path::Path}; use anyhow::{bail, Context, Result}; use config::{ @@ -12,7 +12,24 @@ use xshell::Shell; use crate::defaults::{DEFAULT_OBSERVABILITY_PORT, PORT_RANGE_END, PORT_RANGE_START}; pub struct EcosystemPorts { - pub ports: HashMap>, + pub ports: HashMap>, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct PortInfo { + pub port: u16, + pub file_path: String, + pub description: String, +} + +impl fmt::Display for PortInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "[{}] {} >{}", + self.file_path, self.description, self.port + ) + } } impl EcosystemPorts { @@ -20,14 +37,19 @@ impl EcosystemPorts { self.ports.contains_key(&port) } - pub fn add_port_info(&mut self, port: u16, info: String) { + pub fn add_port_info(&mut self, port: u16, info: PortInfo) { + let info = PortInfo { + port, + file_path: info.file_path, + description: info.description, + }; self.ports.entry(port).or_default().push(info); } - pub fn allocate_port(&mut self, range: Range, info: String) -> anyhow::Result { + pub fn allocate_port(&mut self, range: Range, info: PortInfo) -> anyhow::Result { for port in range { if !self.is_port_assigned(port) { - self.add_port_info(port, info.to_string()); + self.add_port_info(port, info); return Ok(port); } } @@ -48,10 +70,15 @@ impl EcosystemPorts { let mut new_ports = HashMap::new(); for (desc, port) in config.get_default_ports()? { let mut new_port = port + offset; + let port_info = PortInfo { + port: new_port, + description: desc.clone(), + ..Default::default() + }; if self.is_port_assigned(new_port) { - new_port = self.allocate_port(port_range.clone(), desc.clone())?; + new_port = self.allocate_port(port_range.clone(), port_info)?; } else { - self.add_port_info(new_port, desc.to_string()); + self.add_port_info(new_port, port_info); } new_ports.insert(desc, new_port); } @@ -89,7 +116,7 @@ impl EcosystemPorts { if let Some(port) = val.as_u64().and_then(|p| u16::try_from(p).ok()) { let new_port = self.allocate_port( (port + offset as u16)..PORT_RANGE_END, - "".to_string(), + PortInfo::default(), )?; *val = Value::Number(serde_yaml::Number::from(new_port)); updated_ports.insert(port, new_port); @@ -109,6 +136,12 @@ impl EcosystemPorts { } } } + } else if key.as_str().map(|s| s.ends_with("addr")).unwrap_or(false) { + let socket_addr = val.as_str().unwrap().parse::()?; + if let Some(new_port) = updated_ports.get(&socket_addr.port()) { + let new_socket_addr = SocketAddr::new(socket_addr.ip(), *new_port); + *val = Value::String(new_socket_addr.to_string()); + } } } // Continue traversing @@ -126,6 +159,19 @@ impl EcosystemPorts { Ok(()) } + + pub fn group_by_file_path(&self) -> HashMap> { + let mut grouped_ports: HashMap> = HashMap::new(); + for port_infos in self.ports.values() { + for port_info in port_infos { + grouped_ports + .entry(port_info.file_path.clone()) + .or_default() + .push(port_info.clone()); + } + } + grouped_ports + } } impl fmt::Display for EcosystemPorts { @@ -169,7 +215,7 @@ impl EcosystemPortsScanner { // - Ecosystem directory (docker-compose files) let mut dirs = vec![ecosystem_config.config.clone()]; for chain in ecosystem_config.list_of_chains() { - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain)) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain)) { dirs.push(chain_config.configs.clone()); if let Some(external_node_config_path) = &chain_config.external_node_config_path { dirs.push(external_node_config_path.clone()); @@ -272,8 +318,12 @@ impl EcosystemPortsScanner { ecosystem_ports: &mut EcosystemPorts, ) { if let Some(port) = value.as_u64().and_then(|p| u16::try_from(p).ok()) { - let description = format!("[{}] {}", file_path.display(), path); - ecosystem_ports.add_port_info(port, description); + let info = PortInfo { + port, + file_path: file_path.display().to_string(), + description: path.to_string(), + }; + ecosystem_ports.add_port_info(port, info); } } @@ -312,8 +362,12 @@ impl EcosystemPortsScanner { file_path: &Path, ecosystem_ports: &mut EcosystemPorts, ) { - let description = format!("[{}] {}", file_path.display(), path); - ecosystem_ports.add_port_info(port, description); + let info = PortInfo { + port, + file_path: file_path.display().to_string(), + description: path.to_string(), + }; + ecosystem_ports.add_port_info(port, info); } } @@ -354,7 +408,7 @@ impl ConfigWithChainPorts for ExplorerBackendPorts { mod tests { use std::path::PathBuf; - use crate::utils::ports::{EcosystemPorts, EcosystemPortsScanner}; + use crate::utils::ports::{EcosystemPorts, EcosystemPortsScanner, PortInfo}; #[test] fn test_traverse_yaml() { @@ -408,21 +462,28 @@ mod tests { // Check description: let port_3050_info = ecosystem_ports.ports.get(&3050).unwrap(); assert_eq!(port_3050_info.len(), 1); - assert_eq!( - port_3050_info[0], - "[test_config.yaml] api:web3_json_rpc:http_port" - ); + let expected_port_3050_info = PortInfo { + port: 3050, + file_path: "test_config.yaml".to_string(), + description: "api:web3_json_rpc:http_port".to_string(), + }; + assert_eq!(port_3050_info[0], expected_port_3050_info); let port_3412_info = ecosystem_ports.ports.get(&3412).unwrap(); assert_eq!(port_3412_info.len(), 2); - assert_eq!( - port_3412_info[0], - "[test_config.yaml] api:prometheus:listener_port" - ); - assert_eq!( - port_3412_info[1], - "[test_config.yaml] prometheus:listener_port" - ); + let expected_port_3412_info_0 = PortInfo { + port: 3412, + file_path: "test_config.yaml".to_string(), + description: "api:prometheus:listener_port".to_string(), + }; + let expected_port_3412_info_1 = PortInfo { + port: 3412, + file_path: "test_config.yaml".to_string(), + description: "prometheus:listener_port".to_string(), + }; + + assert_eq!(port_3412_info[0], expected_port_3412_info_0); + assert_eq!(port_3412_info[1], expected_port_3412_info_1); } #[test] @@ -445,7 +506,12 @@ mod tests { assert!(ecosystem_ports.is_port_assigned(3050)); let port_info = ecosystem_ports.ports.get(&3050).unwrap(); - assert_eq!(port_info[0], "[test_config.yaml] web3_json_rpc:http_port"); + let expected_port_info = PortInfo { + port: 3050, + file_path: "test_config.yaml".to_string(), + description: "web3_json_rpc:http_port".to_string(), + }; + assert_eq!(port_info[0], expected_port_info); } #[test] @@ -476,7 +542,12 @@ mod tests { assert!(ecosystem_ports.is_port_assigned(8546)); let port_info = ecosystem_ports.ports.get(&8546).unwrap(); - assert_eq!(port_info[0], "[test_config.yaml] reth:ports"); + let expected_port_info = PortInfo { + port: 8546, + file_path: "test_config.yaml".to_string(), + description: "reth:ports".to_string(), + }; + assert_eq!(port_info[0], expected_port_info); } #[test] diff --git a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs b/zkstack_cli/crates/zkstack/src/utils/rocks_db.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs rename to zkstack_cli/crates/zkstack/src/utils/rocks_db.rs diff --git a/zkstack_cli/rust-toolchain b/zkstack_cli/rust-toolchain new file mode 100644 index 00000000000..03c040b91f1 --- /dev/null +++ b/zkstack_cli/rust-toolchain @@ -0,0 +1 @@ +nightly-2024-08-01 diff --git a/zkstack_cli/zkstackup/README.md b/zkstack_cli/zkstackup/README.md new file mode 100644 index 00000000000..4977c4641e0 --- /dev/null +++ b/zkstack_cli/zkstackup/README.md @@ -0,0 +1,70 @@ +# zkstackup - ZK Stack CLI Installer + +`zkstackup` is a script designed to simplify the installation of +[ZK Stack CLI](https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli). It allows you to install the tool from +a local directory or directly from a GitHub repository. + +## Getting Started + +To install `zkstackup`, run the following command: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +After installing `zkstackup`, you can use it to install `zkstack_cli` with: + +```bash +zkstackup +``` + +## Usage + +The `zkstackup` script provides various options for installing ZK Stack CLI: + +### Options + +- `-p, --path ` + Specify a local path to install ZK Stack CLI from. This option is ignored if `--repo` is provided. + +- `-r, --repo ` + GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". + +- `-b, --branch ` + Git branch to use when installing from a repository. Ignored if `--commit` or `--version` is provided. + +- `-c, --commit ` + Git commit hash to use when installing from a repository. Ignored if `--branch` or `--version` is provided. + +- `-v, --version ` + Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. + +### Local Installation + +If you provide a local path using the `-p` or `--path` option, `zkstackup` will install ZK Stack CLI from that +directory. Note that repository-specific arguments (`--repo`, `--branch`, `--commit`, `--version`) will be ignored in +this case to preserve git state. + +### Repository Installation + +By default, `zkstackup` installs ZK Stack CLI from the "matter-labs/zksync-era" GitHub repository. You can specify a +different repository, branch, commit, or version using the respective options. If multiple arguments are provided, +`zkstackup` will prioritize them as follows: + +- `--version` +- `--commit` +- `--branch` + +### Examples + +**Install from a GitHub repository with a specific version:** + +```bash +zkstackup --repo matter-labs/zksync-era --version 0.1.1 +``` + +**Install from a local path:** + +```bash +zkstackup --path /path/to/local/zkstack_cli +``` diff --git a/zkstack_cli/zkstackup/install b/zkstack_cli/zkstackup/install new file mode 100755 index 00000000000..849f0699bc3 --- /dev/null +++ b/zkstack_cli/zkstackup/install @@ -0,0 +1,120 @@ +#!/usr/bin/env bash +set -eo pipefail + +BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/zkstackup" + +BIN_DIR="$HOME/.local/bin" +BIN_PATH="$BIN_DIR/zkstackup" + +main() { + parse_args "$@" + + mkdir -p "$BIN_DIR" + + if [ -n "$ZKSTACKUP_PATH" ]; then + cp -r "$ZKSTACKUP_PATH" "$BIN_DIR" + else + curl -sSfL "$BIN_URL" -o "$BIN_PATH" + fi + + chmod +x "$BIN_PATH" + echo "zkstackup: successfully installed in ${BIN_DIR}." + + add_bin_folder_to_path +} + +add_bin_folder_to_path() { + if [[ ":$PATH:" == *":${BIN_DIR}:"* ]]; then + echo "zkstackup: found ${BIN_DIR} in PATH" + exit 0 + fi + + case $SHELL in + */zsh) + PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" + ;; + */bash) + PROFILE="$HOME/.bashrc" + ;; + */fish) + PROFILE="$HOME/.config/fish/config.fish" + ;; + */ash) + PROFILE="$HOME/.profile" + ;; + *) + echo "zkstackup: could not detect shell, manually add ${BIN_DIR} to your PATH." + exit 1 + ;; + esac + + if [[ ! -f "$PROFILE" ]]; then + echo "zkstackup: Profile file $PROFILE does not exist, creating it." + touch "$PROFILE" + fi + + if [[ "$SHELL" == *"/fish"* ]]; then + echo -e "\n# Added by zkstackup\nfish_add_path -a $BIN_DIR" >>"$PROFILE" + echo "zkstackup: Added $BIN_DIR to PATH in $PROFILE using fish_add_path." + else + echo -e "\n# Added by zkstackup\nexport PATH=\"\$PATH:$BIN_DIR\"" >>"$PROFILE" + echo "zkstackup: Added $BIN_DIR to PATH in $PROFILE." + fi + + echo + echo "Added zkstackup to PATH." + echo "Run 'source $PROFILE' or start a new terminal session to use zkstackup." + echo "Then run 'zkstackup' to install ZK Stack CLI." +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --) + shift + break + ;; + -p | --path) + shift + ZKSTACKUP_PATH=$1 + ;; + -l | --local) + ZKSTACKUP_PATH="./" + ;; + -g | --global) + BIN_DIR="/usr/local/bin" + BIN_PATH="$BIN_DIR/zkstackup" + ;; + -h | --help) + usage + exit 0 + ;; + *) + err "Unknown argument: $1" + usage + exit 1 + ;; + esac + shift + done +} + + +usage() { + cat < Specify a local path to install zkstackup from. + -l, --local Install zkstackup from the current directory. + -g, --global Install zkstackup for all users. + -h, --help Show this help message and exit. + +Examples: + $(basename "$0") --path /path/to/zkstackup +EOF +} + +main "$@" diff --git a/zkstack_cli/zkstackup/zkstackup b/zkstack_cli/zkstackup/zkstackup new file mode 100755 index 00000000000..e91bbc17905 --- /dev/null +++ b/zkstack_cli/zkstackup/zkstackup @@ -0,0 +1,272 @@ +#!/usr/bin/env bash +set -eo pipefail + +LOCAL_DIR="$HOME/.local/" +BIN_DIR="$LOCAL_DIR/bin" + +BINS=() + +main() { + parse_args "$@" + + zkstack_banner + + check_prerequisites + mkdir -p "$BIN_DIR" + + BINS+=(zkstack) + + if [ -n "$ZKSTACKUP_PATH" ]; then + install_local + else + install_from_repo + fi + + zkstack_banner + + add_bin_folder_to_path + + for bin in "${BINS[@]}"; do + success "Installed $bin to $BIN_DIR/$bin" + done +} + +PREREQUISITES=(cargo git) + +check_prerequisites() { + say "Checking prerequisites" + + failed_prerequisites=() + for prerequisite in "${PREREQUISITES[@]}"; do + if ! check_prerequisite "$prerequisite"; then + failed_prerequisites+=("$prerequisite") + fi + done + if [ ${#failed_prerequisites[@]} -gt 0 ]; then + err "The following prerequisites are missing: ${failed_prerequisites[*]}" + exit 1 + fi +} + +check_prerequisite() { + command -v "$1" &>/dev/null +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --) + shift + break + ;; + -p | --path) + shift + ZKSTACKUP_PATH=$1 + ;; + -l | --local) + ZKSTACKUP_PATH="./" + ;; + -g | --global) + LOCAL_DIR="/usr/local" + BIN_DIR="$LOCAL_DIR/bin" + ;; + -r | --repo) + shift + ZKSTACKUP_REPO=$1 + ;; + -b | --branch) + shift + ZKSTACKUP_BRANCH=$1 + ;; + -c | --commit) + shift + ZKSTACKUP_COMMIT=$1 + ;; + -v | --version) + shift + ZKSTACKUP_VERSION=$1 + ;; + -h | --help) + usage + exit 0 + ;; + *) + err "Unknown argument: $1" + usage + exit 1 + ;; + esac + shift + done +} + +usage() { + cat < Specify a local path to install ZK Stack CLI from. Ignored if --repo is provided. + -l, --local Install ZK Stack CLI from the current directory. Ignored if --repo is provided. + -g, --global Install ZK Stack CLI for all users. + -r, --repo GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". + -b, --branch Git branch to use when installing from a repository. Ignored if --commit or --version is provided. + -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. + -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. + -h, --help Show this help message and exit. + +Examples: + $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 +EOF +} + +install_local() { + if [ ! -d "$ZKSTACKUP_PATH/zkstack_cli" ]; then + err "Path $ZKSTACKUP_PATH does not contain zkstack_cli" + exit 1 + fi + + if [ "$ZKSTACKUP_PATH" = "./" ]; then + if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then + git config --local core.hooksPath || + git config --local core.hooksPath ./.githooks + fi + fi + + if [ -n "$ZKSTACKUP_BRANCH" ] || [ -n "$ZKSTACKUP_COMMIT" ] || [ -n "$ZKSTACKUP_VERSION" ] || [ -n "$ZKSTACKUP_REPO" ]; then + warn "Ignoring --repo, --branch, --commit and --version arguments when installing from local path" + fi + + say "Installing ZK Stack CLI from $ZKSTACKUP_PATH" + ensure cd "$ZKSTACKUP_PATH"/zkstack_cli + + for bin in "${BINS[@]}"; do + say "Installing $bin" + ensure cargo install --root $LOCAL_DIR --path ./crates/$bin --force + chmod +x "$BIN_DIR/$bin" + done +} + +install_from_repo() { + if [ -n "$ZKSTACKUP_PATH" ]; then + warn "Ignoring --path argument when installing from repository" + fi + + ZKSTACKUP_REPO=${ZKSTACKUP_REPO:-"matter-labs/zksync-era"} + + say "Installing ZK Stack CLI from $ZKSTACKUP_REPO" + + if [ -n "$ZKSTACKUP_VERSION" ]; then + if [ -n "$ZKSTACKUP_COMMIT" ] || [ -n "$ZKSTACKUP_BRANCH" ]; then + warn "Ignoring --commit and --branch arguments when installing by version" + fi + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --tag "zkstack_cli-v$ZKSTACKUP_VERSION" --locked "${BINS[@]}" --force + elif [ -n "$ZKSTACKUP_COMMIT" ]; then + if [ -n "$ZKSTACKUP_BRANCH" ]; then + warn "Ignoring --branch argument when installing by commit" + fi + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --rev "$ZKSTACKUP_COMMIT" --locked "${BINS[@]}" --force + elif [ -n "$ZKSTACKUP_BRANCH" ]; then + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --branch "$ZKSTACKUP_BRANCH" --locked "${BINS[@]}" --force + else + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --locked "${BINS[@]}" --force + fi +} + +add_bin_folder_to_path() { + if [[ ":$PATH:" == *":${BIN_DIR}:"* ]]; then + echo "found ${BIN_DIR} in PATH" + exit 0 + fi + + case $SHELL in + */zsh) + PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" + ;; + */bash) + PROFILE="$HOME/.bashrc" + ;; + */fish) + PROFILE="$HOME/.config/fish/config.fish" + ;; + */ash) + PROFILE="$HOME/.profile" + ;; + *) + echo "could not detect shell, manually add ${BIN_DIR} to your PATH." + exit 1 + ;; + esac + + if [[ ! -f "$PROFILE" ]]; then + echo "Profile file $PROFILE does not exist, creating it." + touch "$PROFILE" + fi + + if [[ "$SHELL" == *"/fish"* ]]; then + echo -e "\n# Added by zkstackup\nfish_add_path -a $BIN_DIR" >>"$PROFILE" + echo "Added $BIN_DIR to PATH in $PROFILE using fish_add_path." + else + echo -e "\n# Added by zkstackup\nexport PATH=\"\$PATH:$BIN_DIR\"" >>"$PROFILE" + echo "Added $BIN_DIR to PATH in $PROFILE." + fi + + echo + echo "Added zkstack to PATH." + echo "Run 'source $PROFILE' or start a new terminal session to use zkstack." +} + +ensure() { + if ! "$@"; then + err "command failed: $*" + exit 1 + fi +} + +say() { + local action="${1%% *}" + local rest="${1#"$action" }" + + echo -e "\033[1;32m$action\033[0m $rest" +} + +success() { + echo -e "\033[1;32m$1\033[0m" +} + +warn() { + echo -e "\033[1;33mWARNING: $1\033[0m" +} + +err() { + echo -e "\033[1;31mERROR: $1\033[0m" >&2 +} + +zkstack_banner() { + printf ' + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + + ███████╗██╗ ██╗ ███████╗████████╗ █████╗ ██████╗██╗ ██╗ + ╚══███╔╝██║ ██╔╝ ██╔════╝╚══██╔══╝██╔══██╗██╔════╝██║ ██╔╝ + ███╔╝ █████╔╝ ███████╗ ██║ ███████║██║ █████╔╝ + ███╔╝ ██╔═██╗ ╚════██║ ██║ ██╔══██║██║ ██╔═██╗ + ███████╗██║ ██╗ ███████║ ██║ ██║ ██║╚██████╗██║ ██╗ + ╚══════╝╚═╝ ╚═╝ ╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝ + + + A Comprehensive Toolkit for Creating and Managing ZK Stack Chains + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +Repo : https://github.com/matter-labs/zksync-era/ +Docs : https://docs.zksync.io/ +Contribute : https://github.com/matter-labs/zksync-era/pulls + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +' +} + +main "$@" From 0835f8d79892623ed46dbd8861fee272e81435ac Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 29 Oct 2024 00:24:46 +0200 Subject: [PATCH 139/140] feat: merge main to sync-layer-stable (#3184) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ merge main to sync-layer-stable ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/ci-core-reusable.yml | 8 +- .github/workflows/vm-perf-comparison.yml | 33 +- .github/workflows/vm-perf-to-prometheus.yml | 4 +- Cargo.lock | 116 +++--- Cargo.toml | 10 +- core/bin/zksync_server/src/node_builder.rs | 8 +- core/lib/config/src/configs/experimental.rs | 5 + .../config/src/configs/proof_data_handler.rs | 13 + core/lib/config/src/testonly.rs | 2 + core/lib/env_config/src/proof_data_handler.rs | 2 + core/lib/env_config/src/vm_runner.rs | 2 + .../src/versions/testonly/l1_tx_execution.rs | 49 ++- core/lib/multivm/src/versions/vm_fast/mod.rs | 2 +- .../versions/vm_fast/tests/l1_tx_execution.rs | 8 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 27 +- .../vm_latest/tests/l1_tx_execution.rs | 8 +- core/lib/multivm/src/vm_instance.rs | 2 +- core/lib/protobuf_config/src/experimental.rs | 17 +- .../protobuf_config/src/proof_data_handler.rs | 9 + .../src/proto/config/experimental.proto | 1 + .../src/proto/config/prover.proto | 1 + core/lib/vm_executor/Cargo.toml | 1 + core/lib/vm_executor/src/batch/factory.rs | 2 +- core/lib/vm_executor/src/oneshot/metrics.rs | 16 +- core/lib/vm_executor/src/oneshot/mod.rs | 331 ++++++++++++------ core/lib/vm_executor/src/oneshot/tests.rs | 107 ++++++ core/lib/vm_executor/src/testonly.rs | 32 +- core/lib/vm_interface/src/storage/mod.rs | 2 + .../lib/vm_interface/src/storage/overrides.rs | 70 ++++ core/lib/vm_interface/src/types/inputs/mod.rs | 2 +- .../src/execution_sandbox/execute.rs | 18 +- .../src/execution_sandbox/storage.rs | 148 +++----- .../src/execution_sandbox/validate.rs | 7 +- core/node/api_server/src/testonly.rs | 29 +- core/node/api_server/src/tx_sender/mod.rs | 8 + .../src/tx_sender/tests/gas_estimation.rs | 38 +- .../api_server/src/tx_sender/tests/mod.rs | 3 +- core/node/api_server/src/web3/tests/vm.rs | 5 +- core/node/consensus/src/en.rs | 39 ++- core/node/consensus/src/mn.rs | 201 ++++++----- core/node/consensus/src/vm.rs | 5 +- .../layers/web3_api/tx_sender.rs | 13 +- .../src/tee_request_processor.rs | 2 +- core/node/proof_data_handler/src/tests.rs | 2 + core/tests/vm-benchmark/Cargo.toml | 4 +- core/tests/vm-benchmark/benches/iai.rs | 35 -- .../vm-benchmark/benches/instructions.rs | 206 +++++++++++ core/tests/vm-benchmark/src/bin/common/mod.rs | 54 --- .../src/bin/compare_iai_results.rs | 108 ------ .../src/bin/iai_results_to_prometheus.rs | 52 --- .../src/bin/instruction_counts.rs | 106 +++++- core/tests/vm-benchmark/src/criterion.rs | 6 +- core/tests/vm-benchmark/src/lib.rs | 2 +- core/tests/vm-benchmark/src/vm.rs | 114 +++--- etc/env/base/proof_data_handler.toml | 1 + etc/env/ecosystems/mainnet.yaml | 5 + etc/env/file_based/general.yaml | 3 +- etc/env/file_based/overrides/mainnet.yaml | 1 + etc/env/file_based/overrides/testnet.yaml | 1 + .../overrides/tests/integration.yaml | 4 + .../overrides/tests/loadtest-new.yaml | 4 + .../overrides/tests/loadtest-old.yaml | 1 + prover/Cargo.lock | 263 ++++++++------ prover/Cargo.toml | 12 +- .../prover_autoscaler/src/cluster_types.rs | 7 + .../prover_autoscaler/src/global/scaler.rs | 31 +- .../bin/prover_autoscaler/src/k8s/watcher.rs | 144 +++++--- .../zkstack/src/commands/ecosystem/common.rs | 1 - 68 files changed, 1662 insertions(+), 911 deletions(-) create mode 100644 core/lib/vm_executor/src/oneshot/tests.rs create mode 100644 core/lib/vm_interface/src/storage/overrides.rs delete mode 100644 core/tests/vm-benchmark/benches/iai.rs create mode 100644 core/tests/vm-benchmark/benches/instructions.rs delete mode 100644 core/tests/vm-benchmark/src/bin/common/mod.rs delete mode 100644 core/tests/vm-benchmark/src/bin/compare_iai_results.rs delete mode 100644 core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs create mode 100644 etc/env/file_based/overrides/tests/integration.yaml diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 0e1c69ae4db..aa7edefda8a 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -403,12 +403,16 @@ jobs: - name: Run servers run: | + # Override config for part of chains to test the default config as well + ci_run zkstack dev config-writer --path etc/env/file_based/overrides/tests/integration.yaml --chain era + ci_run zkstack dev config-writer --path etc/env/file_based/overrides/tests/integration.yaml --chain validium + ci_run zkstack server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & ci_run zkstack server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & ci_run zkstack server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & ci_run zkstack server --ignore-prerequisites --chain consensus \ - --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ - &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & + --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ + &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & ci_run sleep 5 diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index ae7e5ee671b..f3d11c430eb 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -40,6 +40,8 @@ jobs: # echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env # echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env # echo "RUSTC_WRAPPER=sccache" >> .env +# # Set the minimum reported instruction count difference to reduce noise +# echo "BENCHMARK_DIFF_THRESHOLD_PERCENT=2" >> .env # # - name: init # run: | @@ -51,8 +53,8 @@ jobs: # run: | # ci_run zkstackup -g --local # ci_run zkstack dev contracts --system-contracts -# ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai -# ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes +# ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose || echo "Instructions benchmark is missing" +# ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes # # - name: checkout PR # run: | @@ -60,24 +62,39 @@ jobs: # # - name: run benchmarks on PR # shell: bash +# id: comparison # run: | # ci_run zkstackup -g --local # ci_run zkstack dev contracts --system-contracts -# ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai -# ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes +# ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose # +# ci_run cargo bench --package vm-benchmark --bench instructions -- --print > instructions.log 2>/dev/null +# # Output all lines from the benchmark result starting from the "## ..." comparison header. +# # Since the output spans multiple lines, we use a heredoc declaration. # EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) # echo "speedup<<$EOF" >> $GITHUB_OUTPUT -# ci_run cargo run --package vm-benchmark --release --bin compare_iai_results base-iai pr-iai base-opcodes pr-opcodes >> $GITHUB_OUTPUT +# sed -n '/^## /,$p' instructions.log >> $GITHUB_OUTPUT +# echo "$EOF" >> $GITHUB_OUTPUT +# +# ci_run cargo run --package vm-benchmark --release --bin instruction_counts -- --diff base-opcodes > opcodes.log +# echo "opcodes<<$EOF" >> $GITHUB_OUTPUT +# sed -n '/^## /,$p' opcodes.log >> $GITHUB_OUTPUT # echo "$EOF" >> $GITHUB_OUTPUT -# id: comparison # # - name: Comment on PR # uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 +# if: steps.comparison.outputs.speedup != '' || steps.comparison.outputs.opcodes != '' # with: # message: | -# ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} # ${{ steps.comparison.outputs.speedup }} +# ${{ steps.comparison.outputs.opcodes }} # comment_tag: vm-performance-changes # mode: recreate -# create_if_not_exists: ${{ steps.comparison.outputs.speedup != '' }} +# create_if_not_exists: true +# - name: Remove PR comment +# uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 +# if: steps.comparison.outputs.speedup == '' && steps.comparison.outputs.opcodes == '' +# with: +# comment_tag: vm-performance-changes +# message: 'No performance difference detected (anymore)' +# mode: delete diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index d336a1472e4..93d33116794 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -48,5 +48,5 @@ jobs: ci_run cargo bench --package vm-benchmark --bench oneshot # Run only benches with 1,000 transactions per batch to not spend too much time ci_run cargo bench --package vm-benchmark --bench batch '/1000$' - ci_run cargo bench --package vm-benchmark --bench iai | tee iai-result - ci_run cargo run --package vm-benchmark --bin iai_results_to_prometheus --release < iai-result + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose + ci_run cargo bench --package vm-benchmark --bench instructions -- --print diff --git a/Cargo.lock b/Cargo.lock index 629869af757..68daf2f3e10 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -959,7 +959,7 @@ name = "block_reverter" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "serde_json", "tokio", "zksync_block_reverter", @@ -1327,14 +1327,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" +checksum = "2501cc688ef391013019495ae7035cfd54f86987e36d10f73976ce4c5d413c5a" dependencies = [ "derivative", "serde", - "zk_evm 0.150.6", - "zkevm_circuits 0.150.6", + "zk_evm 0.150.7", + "zkevm_circuits 0.150.7", ] [[package]] @@ -1394,11 +1394,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" +checksum = "917d27db531fdd98a51e42ea465bc097f48cc849e7fad68d7856087d15125be1" dependencies = [ - "circuit_encodings 0.150.6", + "circuit_encodings 0.150.7", "derivative", "rayon", "serde", @@ -1445,9 +1445,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -1455,14 +1455,15 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", "clap_lex 0.7.2", "strsim 0.11.1", + "terminal_size", ] [[package]] @@ -2796,7 +2797,7 @@ name = "genesis_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "futures 0.3.30", "serde", "serde_json", @@ -3472,12 +3473,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "iai" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" - [[package]] name = "iana-time-zone" version = "0.1.61" @@ -4122,7 +4117,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -4392,7 +4387,7 @@ name = "merkle_tree_consistency_checker" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "tracing", "zksync_config", "zksync_env_config", @@ -5498,7 +5493,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck 0.5.0", + "heck 0.4.1", "itertools 0.12.1", "log", "multimap", @@ -6583,7 +6578,7 @@ name = "selector_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "ethabi", "glob", "hex", @@ -7907,6 +7902,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + [[package]] name = "test-casing" version = "0.1.3" @@ -8751,11 +8756,11 @@ version = "0.1.0" dependencies = [ "assert_matches", "criterion", - "iai", "once_cell", "rand 0.8.5", "tokio", "vise", + "yab", "zksync_contracts", "zksync_multivm", "zksync_types", @@ -9239,6 +9244,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "yab" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b06cc62d4cec617d3c259537be0fcaa8a5bcf72ddf2983823d9528605f36ed3" +dependencies = [ + "anes", + "clap 4.5.20", + "num_cpus", + "thiserror", +] + [[package]] name = "yansi" version = "1.0.1" @@ -9360,9 +9377,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" +checksum = "3cc74fbe2b45fd19e95c59ea792c795feebdb616ebaa463f0ac567f495f47387" dependencies = [ "anyhow", "lazy_static", @@ -9370,7 +9387,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.6", + "zk_evm_abstractions 0.150.7", ] [[package]] @@ -9401,15 +9418,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" +checksum = "37f333a3b059899df09e40deb041af881bc03e496fda5eec618ffb5e854ee7df" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", ] [[package]] @@ -9458,9 +9475,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" +checksum = "d06fb35b00699d25175a2ad447f86a9088af8b0bc698bb57086fb04c13e52eab" dependencies = [ "arrayvec 0.7.6", "boojum", @@ -9472,7 +9489,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", "zksync_cs_derive", ] @@ -9520,9 +9537,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" +checksum = "b83f3b279248af4ca86dec20a54127f02110b45570f3f6c1d13df49ba75c28a5" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -9646,7 +9663,7 @@ dependencies = [ "anyhow", "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "futures 0.3.30", "itertools 0.10.5", "num_cpus", @@ -9658,7 +9675,7 @@ dependencies = [ "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -10231,7 +10248,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "clap 4.5.18", + "clap 4.5.20", "envy", "futures 0.3.30", "rustc_version", @@ -10379,9 +10396,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" +checksum = "dc58af8e4e4ad1a851ffd2275e6a44ead0f15a7eaac9dc9d60a56b3b9c9b08e8" dependencies = [ "boojum", "derivative", @@ -10391,7 +10408,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.6", + "zkevm_circuits 0.150.7", ] [[package]] @@ -10440,7 +10457,7 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", - "clap 4.5.18", + "clap 4.5.20", "insta", "leb128", "once_cell", @@ -10520,7 +10537,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "ethabi", "hex", "itertools 0.10.5", @@ -10534,7 +10551,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_contracts", "zksync_eth_signer", "zksync_mini_merkle_tree", @@ -10576,7 +10593,7 @@ dependencies = [ "tower-http", "tracing", "vise", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_config", "zksync_consensus_roles", "zksync_contracts", @@ -10977,7 +10994,7 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "serde", "serde_json", "serde_with", @@ -11026,7 +11043,7 @@ name = "zksync_server" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "futures 0.3.30", "serde_json", "tikv-jemallocator", @@ -11340,8 +11357,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0cc dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.6", - "zkevm_opcode_defs 0.150.6", + "zk_evm_abstractions 0.150.7", + "zkevm_opcode_defs 0.150.7", "zksync_vm2_interface", ] @@ -11361,6 +11378,7 @@ dependencies = [ "assert_matches", "async-trait", "once_cell", + "test-casing", "tokio", "tracing", "vise", diff --git a/Cargo.toml b/Cargo.toml index 0f8e6ba77ae..6d51e5060aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -122,6 +122,7 @@ derive_more = "1.0.0" envy = "0.4" ethabi = "18.0.0" flate2 = "1.0.28" +fraction = "0.15.3" futures = "0.3" glob = "0.3" google-cloud-auth = "0.16.0" @@ -131,7 +132,6 @@ hex = "0.4" http = "1.1" httpmock = "0.7.0" hyper = "1.3" -iai = "0.1" insta = "1.29.0" itertools = "0.10" jsonrpsee = { version = "0.23", default-features = false } @@ -190,7 +190,7 @@ tracing-opentelemetry = "0.25.0" time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" -fraction = "0.15.3" +yab = "0.1.0" # Proc-macro syn = "2.0" @@ -219,15 +219,15 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.6" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.7" } crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.30.1" } -kzg = { package = "zksync_kzg", version = "=0.150.6" } +kzg = { package = "zksync_kzg", version = "=0.150.7" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } zk_evm_1_3_3 = { package = "zk_evm", version = "0.133" } zk_evm_1_4_0 = { package = "zk_evm", version = "0.140" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.6" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.7" } # New VM; pinned to a specific commit because of instability zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "df5bec3d04d64d434f9b0ccb285ba4681008f7b3" } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 0ac50e624cd..048d8fbfd10 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -315,10 +315,12 @@ impl MainNodeBuilder { latest_values_cache_size: rpc_config.latest_values_cache_size() as u64, latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(), }; + let vm_config = try_load_config!(self.configs.experimental_vm_config); // On main node we always use master pool sink. self.node.add_layer(MasterPoolSinkLayer); - self.node.add_layer(TxSenderLayer::new( + + let layer = TxSenderLayer::new( TxSenderConfig::new( &sk_config, &rpc_config, @@ -329,7 +331,9 @@ impl MainNodeBuilder { ), postgres_storage_caches_config, rpc_config.vm_concurrency_limit(), - )); + ); + let layer = layer.with_vm_mode(vm_config.api_fast_vm_mode); + self.node.add_layer(layer); Ok(self) } diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index 618cfd3d388..a87a221ef22 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -106,4 +106,9 @@ pub struct ExperimentalVmConfig { /// the new VM doesn't produce call traces and can diverge from the old VM! #[serde(default)] pub state_keeper_fast_vm_mode: FastVmMode, + + /// Fast VM mode to use in the API server. Currently, some operations are not supported by the fast VM (e.g., `debug_traceCall` + /// or transaction validation), so the legacy VM will always be used for them. + #[serde(default)] + pub api_fast_vm_mode: FastVmMode, } diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index 1094b1bb180..1d8703df51a 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -9,6 +9,9 @@ pub struct TeeConfig { pub tee_support: bool, /// All batches before this one are considered to be processed. pub first_tee_processed_batch: L1BatchNumber, + /// Timeout in seconds for retrying TEE proof generation if it fails. Retries continue + /// indefinitely until successful. + pub tee_proof_generation_timeout_in_secs: u16, } impl Default for TeeConfig { @@ -16,6 +19,8 @@ impl Default for TeeConfig { TeeConfig { tee_support: Self::default_tee_support(), first_tee_processed_batch: Self::default_first_tee_processed_batch(), + tee_proof_generation_timeout_in_secs: + Self::default_tee_proof_generation_timeout_in_secs(), } } } @@ -28,6 +33,14 @@ impl TeeConfig { pub fn default_first_tee_processed_batch() -> L1BatchNumber { L1BatchNumber(0) } + + pub fn default_tee_proof_generation_timeout_in_secs() -> u16 { + 600 + } + + pub fn tee_proof_generation_timeout(&self) -> Duration { + Duration::from_secs(self.tee_proof_generation_timeout_in_secs.into()) + } } #[derive(Debug, Deserialize, Clone, PartialEq)] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 9d72b4ab367..4e6930a3384 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -340,6 +340,7 @@ impl Distribution for EncodeDist { configs::ExperimentalVmConfig { playground: self.sample(rng), state_keeper_fast_vm_mode: gen_fast_vm_mode(rng), + api_fast_vm_mode: gen_fast_vm_mode(rng), } } } @@ -686,6 +687,7 @@ impl Distribution for EncodeDist { tee_config: configs::TeeConfig { tee_support: self.sample(rng), first_tee_processed_batch: L1BatchNumber(rng.gen()), + tee_proof_generation_timeout_in_secs: self.sample(rng), }, } } diff --git a/core/lib/env_config/src/proof_data_handler.rs b/core/lib/env_config/src/proof_data_handler.rs index b5bfda4544e..47848585e76 100644 --- a/core/lib/env_config/src/proof_data_handler.rs +++ b/core/lib/env_config/src/proof_data_handler.rs @@ -28,6 +28,7 @@ mod tests { tee_config: TeeConfig { tee_support: true, first_tee_processed_batch: L1BatchNumber(1337), + tee_proof_generation_timeout_in_secs: 600, }, } } @@ -39,6 +40,7 @@ mod tests { PROOF_DATA_HANDLER_HTTP_PORT="3320" PROOF_DATA_HANDLER_TEE_SUPPORT="true" PROOF_DATA_HANDLER_FIRST_TEE_PROCESSED_BATCH="1337" + PROOF_DATA_HANDLER_TEE_PROOF_GENERATION_TIMEOUT_IN_SECS="600" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs index 730a79dd340..0a29d1256bd 100644 --- a/core/lib/env_config/src/vm_runner.rs +++ b/core/lib/env_config/src/vm_runner.rs @@ -55,6 +55,7 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=new + EXPERIMENTAL_VM_API_FAST_VM_MODE=shadow EXPERIMENTAL_VM_PLAYGROUND_FAST_VM_MODE=shadow EXPERIMENTAL_VM_PLAYGROUND_DB_PATH=/db/vm_playground EXPERIMENTAL_VM_PLAYGROUND_FIRST_PROCESSED_BATCH=123 @@ -64,6 +65,7 @@ mod tests { let config = ExperimentalVmConfig::from_env().unwrap(); assert_eq!(config.state_keeper_fast_vm_mode, FastVmMode::New); + assert_eq!(config.api_fast_vm_mode, FastVmMode::Shadow); assert_eq!(config.playground.fast_vm_mode, FastVmMode::Shadow); assert_eq!(config.playground.db_path.unwrap(), "/db/vm_playground"); assert_eq!(config.playground.first_processed_batch, L1BatchNumber(123)); diff --git a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs index e98a8385f02..37a2bf2bec2 100644 --- a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs @@ -1,3 +1,4 @@ +use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::l1_messenger_contract; use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; @@ -5,13 +6,17 @@ use zksync_test_account::TxType; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, U256, + Address, Execute, ExecuteTransactionCommon, U256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; -use super::{read_test_contract, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; +use super::{ + read_test_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, BASE_SYSTEM_CONTRACTS, +}; use crate::{ - interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + interface::{ + ExecutionResult, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, VmRevertReason, + }, utils::StorageWritesDeduplicator, }; @@ -180,3 +185,41 @@ pub(crate) fn test_l1_tx_execution_high_gas_limit() { assert!(res.result.is_failed(), "The transaction should've failed"); } + +pub(crate) fn test_l1_tx_execution_gas_estimation_with_low_gas() { + let counter_contract = read_test_contract(); + let counter_address = Address::repeat_byte(0x11); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::EstimateFee) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_contract, + counter_address, + )]) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let mut tx = account.get_test_contract_transaction( + counter_address, + false, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + let ExecuteTransactionCommon::L1(data) = &mut tx.common_data else { + unreachable!(); + }; + // This gas limit is chosen so that transaction starts getting executed by the bootloader, but then runs out of gas + // before its execution result is posted. + data.gas_limit = 15_000.into(); + + vm.vm.push_transaction(tx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + assert_matches!( + &res.result, + ExecutionResult::Revert { output: VmRevertReason::General { msg, .. } } + if msg.contains("reverted with empty reason") + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs index 733ca9d82fc..a31374ea5a0 100644 --- a/core/lib/multivm/src/versions/vm_fast/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -1,4 +1,4 @@ -pub use zksync_vm2::interface::Tracer; +pub use zksync_vm2::interface; pub use self::{circuits_tracer::CircuitsTracer, vm::Vm}; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 0174eeffd7e..f0295702017 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -1,6 +1,7 @@ use crate::{ versions::testonly::l1_tx_execution::{ - test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, + test_l1_tx_execution, test_l1_tx_execution_gas_estimation_with_low_gas, + test_l1_tx_execution_high_gas_limit, }, vm_fast::Vm, }; @@ -14,3 +15,8 @@ fn l1_tx_execution() { fn l1_tx_execution_high_gas_limit() { test_l1_tx_execution_high_gas_limit::>(); } + +#[test] +fn l1_tx_execution_gas_estimation_with_low_gas() { + test_l1_tx_execution_gas_estimation_with_low_gas::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 435b6529c9e..d2ace9b7771 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -51,8 +51,8 @@ use crate::{ }, vm_latest::{ constants::{ - get_vm_hook_params_start_position, get_vm_hook_position, OPERATOR_REFUNDS_OFFSET, - TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, + get_result_success_first_slot, get_vm_hook_params_start_position, get_vm_hook_position, + OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, }, MultiVMSubversion, }, @@ -213,7 +213,22 @@ impl Vm { } Hook::TxHasEnded => { if let VmExecutionMode::OneTx = execution_mode { - break (last_tx_result.take().unwrap(), false); + // The bootloader may invoke `TxHasEnded` hook without posting a tx result previously. One case when this can happen + // is estimating gas for L1 transactions, if a transaction runs out of gas during execution. + let tx_result = last_tx_result.take().unwrap_or_else(|| { + let tx_has_failed = self.get_tx_result().is_zero(); + if tx_has_failed { + let output = VmRevertReason::General { + msg: "Transaction reverted with empty reason. Possibly out of gas" + .to_string(), + data: vec![], + }; + ExecutionResult::Revert { output } + } else { + ExecutionResult::Success { output: vec![] } + } + }); + break (tx_result, false); } } Hook::AskOperatorForRefund => { @@ -361,6 +376,12 @@ impl Vm { .unwrap() } + fn get_tx_result(&self) -> U256 { + let tx_idx = self.bootloader_state.current_tx(); + let slot = get_result_success_first_slot(VM_VERSION) as usize + tx_idx; + self.read_word_from_bootloader_heap(slot) + } + fn get_debug_log(&self) -> (String, String) { let hook_params = self.get_hook_params(); let mut msg = u256_to_h256(hook_params[0]).as_bytes().to_vec(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 4b7429c2829..3b8a01dbc80 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,6 +1,7 @@ use crate::{ versions::testonly::l1_tx_execution::{ - test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, + test_l1_tx_execution, test_l1_tx_execution_gas_estimation_with_low_gas, + test_l1_tx_execution_high_gas_limit, }, vm_latest::{HistoryEnabled, Vm}, }; @@ -14,3 +15,8 @@ fn l1_tx_execution() { fn l1_tx_execution_high_gas_limit() { test_l1_tx_execution_high_gas_limit::>(); } + +#[test] +fn l1_tx_execution_gas_estimation_with_low_gas() { + test_l1_tx_execution_gas_estimation_with_low_gas::>(); +} diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 5ff27046377..e2f72bd2411 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -234,7 +234,7 @@ pub type ShadowedFastVm = ShadowVm< /// Fast VM variants. #[derive(Debug)] -pub enum FastVmInstance { +pub enum FastVmInstance { /// Fast VM running in isolation. Fast(crate::vm_fast::Vm, Tr>), /// Fast VM shadowed by the latest legacy VM. diff --git a/core/lib/protobuf_config/src/experimental.rs b/core/lib/protobuf_config/src/experimental.rs index 63fa0ca51eb..750dc7b04f0 100644 --- a/core/lib/protobuf_config/src/experimental.rs +++ b/core/lib/protobuf_config/src/experimental.rs @@ -7,6 +7,14 @@ use zksync_protobuf::{repr::ProtoRepr, required}; use crate::{proto::experimental as proto, read_optional_repr}; +fn parse_vm_mode(raw: Option) -> anyhow::Result { + Ok(raw + .map(proto::FastVmMode::try_from) + .transpose() + .context("fast_vm_mode")? + .map_or_else(FastVmMode::default, |mode| mode.parse())) +} + impl ProtoRepr for proto::Db { type Type = configs::ExperimentalDBConfig; @@ -105,12 +113,8 @@ impl ProtoRepr for proto::Vm { fn read(&self) -> anyhow::Result { Ok(Self::Type { playground: read_optional_repr(&self.playground).unwrap_or_default(), - state_keeper_fast_vm_mode: self - .state_keeper_fast_vm_mode - .map(proto::FastVmMode::try_from) - .transpose() - .context("fast_vm_mode")? - .map_or_else(FastVmMode::default, |mode| mode.parse()), + state_keeper_fast_vm_mode: parse_vm_mode(self.state_keeper_fast_vm_mode)?, + api_fast_vm_mode: parse_vm_mode(self.api_fast_vm_mode)?, }) } @@ -120,6 +124,7 @@ impl ProtoRepr for proto::Vm { state_keeper_fast_vm_mode: Some( proto::FastVmMode::new(this.state_keeper_fast_vm_mode).into(), ), + api_fast_vm_mode: Some(proto::FastVmMode::new(this.api_fast_vm_mode).into()), } } } diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs index a587c702633..c01e163bd77 100644 --- a/core/lib/protobuf_config/src/proof_data_handler.rs +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -23,6 +23,12 @@ impl ProtoRepr for proto::ProofDataHandler { .first_tee_processed_batch .map(|x| L1BatchNumber(x as u32)) .unwrap_or_else(configs::TeeConfig::default_first_tee_processed_batch), + tee_proof_generation_timeout_in_secs: self + .tee_proof_generation_timeout_in_secs + .map(|x| x as u16) + .unwrap_or_else( + configs::TeeConfig::default_tee_proof_generation_timeout_in_secs, + ), }, }) } @@ -33,6 +39,9 @@ impl ProtoRepr for proto::ProofDataHandler { proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), tee_support: Some(this.tee_config.tee_support), first_tee_processed_batch: Some(this.tee_config.first_tee_processed_batch.0 as u64), + tee_proof_generation_timeout_in_secs: Some( + this.tee_config.tee_proof_generation_timeout_in_secs.into(), + ), } } } diff --git a/core/lib/protobuf_config/src/proto/config/experimental.proto b/core/lib/protobuf_config/src/proto/config/experimental.proto index 5e1d045ca67..87af8d3835c 100644 --- a/core/lib/protobuf_config/src/proto/config/experimental.proto +++ b/core/lib/protobuf_config/src/proto/config/experimental.proto @@ -37,4 +37,5 @@ message VmPlayground { message Vm { optional VmPlayground playground = 1; // optional optional FastVmMode state_keeper_fast_vm_mode = 2; // optional; if not set, fast VM is not used + optional FastVmMode api_fast_vm_mode = 3; // optional; if not set, fast VM is not used } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index 92ba770a756..392834d25f3 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -109,4 +109,5 @@ message ProofDataHandler { optional uint32 proof_generation_timeout_in_secs = 2; // required; s optional bool tee_support = 3; // optional optional uint64 first_tee_processed_batch = 4; // optional + optional uint32 tee_proof_generation_timeout_in_secs = 5; // optional } diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index a967aaa969a..06a531252c5 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -26,3 +26,4 @@ vise.workspace = true [dev-dependencies] assert_matches.workspace = true +test-casing.workspace = true diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 5877922b333..124194f3431 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -37,7 +37,7 @@ pub trait BatchTracer: fmt::Debug + 'static + Send + Sealed { const TRACE_CALLS: bool; /// Tracer for the fast VM. #[doc(hidden)] - type Fast: vm_fast::Tracer + Default + 'static; + type Fast: vm_fast::interface::Tracer + Default + 'static; } impl Sealed for () {} diff --git a/core/lib/vm_executor/src/oneshot/metrics.rs b/core/lib/vm_executor/src/oneshot/metrics.rs index 475463300f1..13a832ee3c8 100644 --- a/core/lib/vm_executor/src/oneshot/metrics.rs +++ b/core/lib/vm_executor/src/oneshot/metrics.rs @@ -50,7 +50,7 @@ pub(super) fn report_vm_memory_metrics( tx_id: &str, memory_metrics: &VmMemoryMetrics, vm_execution_took: Duration, - storage_metrics: &StorageViewStats, + storage_stats: &StorageViewStats, ) { MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); @@ -65,10 +65,18 @@ pub(super) fn report_vm_memory_metrics( MEMORY_METRICS .storage_view_cache_size - .observe(storage_metrics.cache_size); + .observe(storage_stats.cache_size); MEMORY_METRICS .full - .observe(memory_metrics.full_size() + storage_metrics.cache_size); + .observe(memory_metrics.full_size() + storage_stats.cache_size); - STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_metrics); + report_vm_storage_metrics(tx_id, vm_execution_took, storage_stats); +} + +pub(super) fn report_vm_storage_metrics( + tx_id: &str, + vm_execution_took: Duration, + storage_stats: &StorageViewStats, +) { + STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_stats); } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index 5f9e4dd3c6f..154c838f824 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -17,23 +17,26 @@ use once_cell::sync::OnceCell; use zksync_multivm::{ interface::{ executor::{OneshotExecutor, TransactionValidator}, - storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, + storage::{ReadStorage, StorageView, StorageWithOverrides}, tracer::{ValidationError, ValidationParams}, - ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, + utils::{DivergenceHandler, ShadowVm}, + Call, ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, - VmInterface, + VmFactory, VmInterface, }, - tracers::{CallTracer, StorageInvocations, ValidationTracer}, + is_supported_by_fast_vm, + tracers::{CallTracer, StorageInvocations, TracerDispatcher, ValidationTracer}, utils::adjust_pubdata_price_for_tx, - vm_latest::HistoryDisabled, + vm_latest::{HistoryDisabled, HistoryEnabled}, zk_evm_latest::ethereum_types::U256, - LegacyVmInstance, MultiVMTracer, + FastVmInstance, HistoryMode, LegacyVmInstance, MultiVMTracer, }; use zksync_types::{ block::pack_block_info, get_nonce_key, l2::L2Tx, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + vm::FastVmMode, AccountTreeId, Nonce, StorageKey, Transaction, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, }; @@ -54,10 +57,14 @@ mod contracts; mod env; mod metrics; mod mock; +#[cfg(test)] +mod tests; /// Main [`OneshotExecutor`] implementation used by the API server. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct MainOneshotExecutor { + fast_vm_mode: FastVmMode, + panic_on_divergence: bool, missed_storage_invocation_limit: usize, execution_latency_histogram: Option<&'static vise::Histogram>, } @@ -67,11 +74,28 @@ impl MainOneshotExecutor { /// The limit is applied for calls and gas estimations, but not during transaction validation. pub fn new(missed_storage_invocation_limit: usize) -> Self { Self { + fast_vm_mode: FastVmMode::Old, + panic_on_divergence: false, missed_storage_invocation_limit, execution_latency_histogram: None, } } + /// Sets the fast VM mode used by this executor. + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + if !matches!(fast_vm_mode, FastVmMode::Old) { + tracing::warn!( + "Running new VM with modes {fast_vm_mode:?}; this can lead to incorrect node behavior" + ); + } + self.fast_vm_mode = fast_vm_mode; + } + + /// Causes the VM to panic on divergence whenever it executes in the shadow mode. By default, a divergence is logged on `ERROR` level. + pub fn panic_on_divergence(&mut self) { + self.panic_on_divergence = true; + } + /// Sets a histogram for measuring VM execution latency. pub fn set_execution_latency_histogram( &mut self, @@ -79,19 +103,31 @@ impl MainOneshotExecutor { ) { self.execution_latency_histogram = Some(histogram); } + + fn select_fast_vm_mode( + &self, + env: &OneshotEnv, + tracing_params: &OneshotTracingParams, + ) -> FastVmMode { + if tracing_params.trace_calls || !is_supported_by_fast_vm(env.system.version) { + FastVmMode::Old // the fast VM doesn't support call tracing or old protocol versions + } else { + self.fast_vm_mode + } + } } #[async_trait] -impl OneshotExecutor for MainOneshotExecutor +impl OneshotExecutor> for MainOneshotExecutor where S: ReadStorage + Send + 'static, { async fn inspect_transaction_with_bytecode_compression( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, args: TxExecutionArgs, - params: OneshotTracingParams, + tracing_params: OneshotTracingParams, ) -> anyhow::Result { let missed_storage_invocation_limit = match env.system.execution_mode { // storage accesses are not limited for tx validation @@ -100,35 +136,24 @@ where self.missed_storage_invocation_limit } }; - let execution_latency_histogram = self.execution_latency_histogram; + let sandbox = VmSandbox { + fast_vm_mode: self.select_fast_vm_mode(&env, &tracing_params), + panic_on_divergence: self.panic_on_divergence, + storage, + env, + execution_args: args, + execution_latency_histogram: self.execution_latency_histogram, + }; tokio::task::spawn_blocking(move || { - let mut tracers = vec![]; - let mut calls_result = Arc::>::default(); - if params.trace_calls { - tracers.push(CallTracer::new(calls_result.clone()).into_tracer_pointer()); - } - tracers.push( - StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer(), - ); - - let executor = VmSandbox::new(storage, env, args, execution_latency_histogram); - let mut result = executor.apply(|vm, transaction| { - let (compression_result, tx_result) = vm - .inspect_transaction_with_bytecode_compression( - &mut tracers.into(), - transaction, - true, - ); - OneshotTransactionExecutionResult { - tx_result: Box::new(tx_result), - compression_result: compression_result.map(drop), - call_traces: vec![], - } - }); - - result.call_traces = Arc::make_mut(&mut calls_result).take().unwrap_or_default(); - result + sandbox.execute_in_vm(|vm, transaction| { + vm.inspect_transaction_with_bytecode_compression( + missed_storage_invocation_limit, + tracing_params, + transaction, + true, + ) + }) }) .await .context("VM execution panicked") @@ -136,13 +161,13 @@ where } #[async_trait] -impl TransactionValidator for MainOneshotExecutor +impl TransactionValidator> for MainOneshotExecutor where S: ReadStorage + Send + 'static, { async fn validate_transaction( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, @@ -152,23 +177,28 @@ where "Unexpected execution mode for tx validation: {:?} (expected `VerifyExecute`)", env.system.execution_mode ); - let execution_latency_histogram = self.execution_latency_histogram; + + let sandbox = VmSandbox { + fast_vm_mode: FastVmMode::Old, + panic_on_divergence: self.panic_on_divergence, + storage, + env, + execution_args: TxExecutionArgs::for_validation(tx), + execution_latency_histogram: self.execution_latency_histogram, + }; tokio::task::spawn_blocking(move || { let (validation_tracer, mut validation_result) = ValidationTracer::::new( validation_params, - env.system.version.into(), + sandbox.env.system.version.into(), ); let tracers = vec![validation_tracer.into_tracer_pointer()]; - let executor = VmSandbox::new( - storage, - env, - TxExecutionArgs::for_validation(tx), - execution_latency_histogram, - ); - let exec_result = executor.apply(|vm, transaction| { + let exec_result = sandbox.execute_in_vm(|vm, transaction| { + let Vm::Legacy(vm) = vm else { + unreachable!("Fast VM is never used for validation yet"); + }; vm.push_transaction(transaction); vm.inspect(&mut tracers.into(), InspectExecutionMode::OneTx) }); @@ -188,70 +218,99 @@ where } #[derive(Debug)] -struct VmSandbox { - vm: Box>, - storage_view: StoragePtr>, - transaction: Transaction, - execution_latency_histogram: Option<&'static vise::Histogram>, +enum Vm { + Legacy(LegacyVmInstance), + Fast(FastVmInstance), } -impl VmSandbox { - /// This method is blocking. - fn new( - storage: S, - mut env: OneshotEnv, - execution_args: TxExecutionArgs, - execution_latency_histogram: Option<&'static vise::Histogram>, - ) -> Self { - let mut storage_view = StorageView::new(storage); - Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); - - let protocol_version = env.system.version; - if execution_args.adjust_pubdata_price { - env.l1_batch.fee_input = adjust_pubdata_price_for_tx( - env.l1_batch.fee_input, - execution_args.transaction.gas_per_pubdata_byte_limit(), - env.l1_batch.enforced_base_fee.map(U256::from), - protocol_version.into(), - ); +impl Vm { + fn inspect_transaction_with_bytecode_compression( + &mut self, + missed_storage_invocation_limit: usize, + params: OneshotTracingParams, + tx: Transaction, + with_compression: bool, + ) -> OneshotTransactionExecutionResult { + let mut calls_result = Arc::>::default(); + let (compression_result, tx_result) = match self { + Self::Legacy(vm) => { + let mut tracers = Self::create_legacy_tracers( + missed_storage_invocation_limit, + params.trace_calls.then(|| calls_result.clone()), + ); + vm.inspect_transaction_with_bytecode_compression(&mut tracers, tx, with_compression) + } + Self::Fast(vm) => { + assert!( + !params.trace_calls, + "Call tracing is not supported by fast VM yet" + ); + let legacy_tracers = Self::create_legacy_tracers::( + missed_storage_invocation_limit, + None, + ); + let mut full_tracer = (legacy_tracers.into(), ()); + vm.inspect_transaction_with_bytecode_compression( + &mut full_tracer, + tx, + with_compression, + ) + } }; - let storage_view = storage_view.to_rc_ptr(); - let vm = Box::new(LegacyVmInstance::new_with_specific_version( - env.l1_batch, - env.system, - storage_view.clone(), - protocol_version.into_api_vm_version(), - )); + OneshotTransactionExecutionResult { + tx_result: Box::new(tx_result), + compression_result: compression_result.map(drop), + call_traces: Arc::make_mut(&mut calls_result).take().unwrap_or_default(), + } + } - Self { - vm, - storage_view, - transaction: execution_args.transaction, - execution_latency_histogram, + fn create_legacy_tracers( + missed_storage_invocation_limit: usize, + calls_result: Option>>>, + ) -> TracerDispatcher, H> { + let mut tracers = vec![]; + if let Some(calls_result) = calls_result { + tracers.push(CallTracer::new(calls_result).into_tracer_pointer()); } + tracers + .push(StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer()); + tracers.into() } +} +/// Full parameters necessary to instantiate a VM for oneshot execution. +#[derive(Debug)] +struct VmSandbox { + fast_vm_mode: FastVmMode, + panic_on_divergence: bool, + storage: StorageWithOverrides, + env: OneshotEnv, + execution_args: TxExecutionArgs, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl VmSandbox { /// This method is blocking. - fn setup_storage_view( - storage_view: &mut StorageView, + fn setup_storage( + storage: &mut StorageWithOverrides, execution_args: &TxExecutionArgs, current_block: Option, ) { let storage_view_setup_started_at = Instant::now(); if let Some(nonce) = execution_args.enforced_nonce { let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); - let full_nonce = storage_view.read_value(&nonce_key); + let full_nonce = storage.read_value(&nonce_key); let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + storage.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); } let payer = execution_args.transaction.payer(); let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + let mut current_balance = h256_to_u256(storage.read_value(&balance_key)); current_balance += execution_args.added_balance; - storage_view.set_value(balance_key, u256_to_h256(current_balance)); + storage.set_value(balance_key, u256_to_h256(current_balance)); // Reset L2 block info if necessary. if let Some(current_block) = current_block { @@ -261,13 +320,13 @@ impl VmSandbox { ); let l2_block_info = pack_block_info(current_block.number.into(), current_block.timestamp); - storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + storage.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); let l2_block_txs_rolling_hash_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ); - storage_view.set_value( + storage.set_value( l2_block_txs_rolling_hash_key, current_block.txs_rolling_hash, ); @@ -280,30 +339,90 @@ impl VmSandbox { } } - pub(super) fn apply(mut self, apply_fn: F) -> T - where - F: FnOnce(&mut LegacyVmInstance, Transaction) -> T, - { + /// This method is blocking. + fn execute_in_vm( + mut self, + action: impl FnOnce(&mut Vm>, Transaction) -> T, + ) -> T { + Self::setup_storage( + &mut self.storage, + &self.execution_args, + self.env.current_block, + ); + + let protocol_version = self.env.system.version; + let mode = self.env.system.execution_mode; + if self.execution_args.adjust_pubdata_price { + self.env.l1_batch.fee_input = adjust_pubdata_price_for_tx( + self.env.l1_batch.fee_input, + self.execution_args.transaction.gas_per_pubdata_byte_limit(), + self.env.l1_batch.enforced_base_fee.map(U256::from), + protocol_version.into(), + ); + }; + + let transaction = self.execution_args.transaction; let tx_id = format!( "{:?}-{}", - self.transaction.initiator_account(), - self.transaction.nonce().unwrap_or(Nonce(0)) + transaction.initiator_account(), + transaction.nonce().unwrap_or(Nonce(0)) ); + let storage_view = StorageView::new(self.storage).to_rc_ptr(); + let mut vm = match self.fast_vm_mode { + FastVmMode::Old => Vm::Legacy(LegacyVmInstance::new_with_specific_version( + self.env.l1_batch, + self.env.system, + storage_view.clone(), + protocol_version.into_api_vm_version(), + )), + FastVmMode::New => Vm::Fast(FastVmInstance::fast( + self.env.l1_batch, + self.env.system, + storage_view.clone(), + )), + FastVmMode::Shadow => { + let mut vm = + ShadowVm::new(self.env.l1_batch, self.env.system, storage_view.clone()); + if !self.panic_on_divergence { + let transaction = format!("{:?}", transaction); + let handler = DivergenceHandler::new(move |errors, _| { + tracing::error!(transaction, ?mode, "{errors}"); + }); + vm.set_divergence_handler(handler); + } + Vm::Fast(FastVmInstance::Shadowed(vm)) + } + }; + let started_at = Instant::now(); - let result = apply_fn(&mut *self.vm, self.transaction); + let result = action(&mut vm, transaction); let vm_execution_took = started_at.elapsed(); if let Some(histogram) = self.execution_latency_histogram { histogram.observe(vm_execution_took); } - let memory_metrics = self.vm.record_vm_memory_metrics(); - metrics::report_vm_memory_metrics( - &tx_id, - &memory_metrics, - vm_execution_took, - &self.storage_view.borrow().stats(), - ); + + match &vm { + Vm::Legacy(vm) => { + let memory_metrics = vm.record_vm_memory_metrics(); + metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + &storage_view.borrow().stats(), + ); + } + Vm::Fast(_) => { + // The new VM implementation doesn't have the same memory model as old ones, so it doesn't report memory metrics, + // only storage-related ones. + metrics::report_vm_storage_metrics( + &format!("Tx {tx_id}"), + vm_execution_took, + &storage_view.borrow().stats(), + ); + } + } result } } diff --git a/core/lib/vm_executor/src/oneshot/tests.rs b/core/lib/vm_executor/src/oneshot/tests.rs new file mode 100644 index 00000000000..8e2a73ea7b0 --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/tests.rs @@ -0,0 +1,107 @@ +//! Oneshot executor tests. + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; +use zksync_multivm::interface::storage::InMemoryStorage; +use zksync_types::{ProtocolVersionId, H256}; +use zksync_utils::bytecode::hash_bytecode; + +use super::*; +use crate::testonly::{ + create_l2_transaction, default_l1_batch_env, default_system_env, FAST_VM_MODES, +}; + +const EXEC_MODES: [TxExecutionMode; 3] = [ + TxExecutionMode::EstimateFee, + TxExecutionMode::EthCall, + TxExecutionMode::VerifyExecute, +]; + +#[test] +fn selecting_vm_for_execution() { + let mut executor = MainOneshotExecutor::new(usize::MAX); + executor.set_fast_vm_mode(FastVmMode::New); + + for exec_mode in EXEC_MODES { + let env = OneshotEnv { + system: default_system_env(exec_mode), + l1_batch: default_l1_batch_env(1), + current_block: None, + }; + // let mode = executor.select_fast_vm_mode(&env, &OneshotTracingParams::default()); + // assert_matches!(mode, FastVmMode::New); + + // Tracing calls is not supported by the new VM. + let mode = executor.select_fast_vm_mode(&env, &OneshotTracingParams { trace_calls: true }); + assert_matches!(mode, FastVmMode::Old); + + // Old protocol versions are not supported either. + let mut old_env = env.clone(); + old_env.system.version = ProtocolVersionId::Version22; + let mode = executor.select_fast_vm_mode(&old_env, &OneshotTracingParams::default()); + assert_matches!(mode, FastVmMode::Old); + } +} + +#[test] +fn setting_up_nonce_and_balance_in_storage() { + let mut storage = StorageWithOverrides::new(InMemoryStorage::default()); + let tx = create_l2_transaction(1_000_000_000.into(), Nonce(1)); + let execution_args = TxExecutionArgs::for_gas_estimate(tx.clone().into()); + VmSandbox::setup_storage(&mut storage, &execution_args, None); + + // Check the overridden nonce and balance. + let nonce_key = get_nonce_key(&tx.initiator_account()); + assert_eq!(storage.read_value(&nonce_key), H256::from_low_u64_be(1)); + let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); + let expected_added_balance = tx.common_data.fee.gas_limit * tx.common_data.fee.max_fee_per_gas; + assert_eq!( + storage.read_value(&balance_key), + u256_to_h256(expected_added_balance) + ); + + let mut storage = InMemoryStorage::default(); + storage.set_value(balance_key, H256::from_low_u64_be(2_000_000_000)); + let mut storage = StorageWithOverrides::new(storage); + VmSandbox::setup_storage(&mut storage, &execution_args, None); + + assert_eq!( + storage.read_value(&balance_key), + u256_to_h256(expected_added_balance + U256::from(2_000_000_000)) + ); +} + +#[test_casing(9, Product((EXEC_MODES, FAST_VM_MODES)))] +#[tokio::test] +async fn inspecting_transfer(exec_mode: TxExecutionMode, fast_vm_mode: FastVmMode) { + let tx = create_l2_transaction(1_000_000_000.into(), Nonce(0)); + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + storage.set_value( + storage_key_for_eth_balance(&tx.initiator_account()), + u256_to_h256(u64::MAX.into()), + ); + let storage = StorageWithOverrides::new(storage); + + let l1_batch = default_l1_batch_env(1); + let env = OneshotEnv { + system: default_system_env(exec_mode), + current_block: Some(StoredL2BlockEnv { + number: l1_batch.first_l2_block.number - 1, + timestamp: l1_batch.first_l2_block.timestamp - 1, + txs_rolling_hash: H256::zero(), + }), + l1_batch, + }; + let args = TxExecutionArgs::for_gas_estimate(tx.into()); + let tracing = OneshotTracingParams::default(); + + let mut executor = MainOneshotExecutor::new(usize::MAX); + executor.set_fast_vm_mode(fast_vm_mode); + let result = executor + .inspect_transaction_with_bytecode_compression(storage, env, args, tracing) + .await + .unwrap(); + result.compression_result.unwrap(); + let exec_result = result.tx_result.result; + assert!(!exec_result.is_failed(), "{exec_result:?}"); +} diff --git a/core/lib/vm_executor/src/testonly.rs b/core/lib/vm_executor/src/testonly.rs index 5bcd604a432..2fa7f075db7 100644 --- a/core/lib/vm_executor/src/testonly.rs +++ b/core/lib/vm_executor/src/testonly.rs @@ -2,11 +2,14 @@ use once_cell::sync::Lazy; use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + utils::derive_base_fee_and_gas_per_pubdata, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + zk_evm_latest::ethereum_types::U256, }; use zksync_types::{ - block::L2BlockHasher, fee_model::BatchFeeInput, vm::FastVmMode, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, + block::L2BlockHasher, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, + transaction_request::PaymasterParams, vm::FastVmMode, Address, K256PrivateKey, L1BatchNumber, + L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; static BASE_SYSTEM_CONTRACTS: Lazy = @@ -43,3 +46,28 @@ pub(crate) fn default_l1_batch_env(number: u32) -> L1BatchEnv { fee_input: BatchFeeInput::sensible_l1_pegged_default(), } } + +pub(crate) fn create_l2_transaction(value: U256, nonce: Nonce) -> L2Tx { + let (max_fee_per_gas, gas_per_pubdata_limit) = derive_base_fee_and_gas_per_pubdata( + BatchFeeInput::sensible_l1_pegged_default(), + ProtocolVersionId::latest().into(), + ); + let fee = Fee { + gas_limit: 10_000_000.into(), + max_fee_per_gas: max_fee_per_gas.into(), + max_priority_fee_per_gas: 0_u64.into(), + gas_per_pubdata_limit: gas_per_pubdata_limit.into(), + }; + L2Tx::new_signed( + Some(Address::random()), + vec![], + nonce, + fee, + value, + L2ChainId::default(), + &K256PrivateKey::random(), + vec![], + PaymasterParams::default(), + ) + .unwrap() +} diff --git a/core/lib/vm_interface/src/storage/mod.rs b/core/lib/vm_interface/src/storage/mod.rs index 6cdcd33db68..aade56ca5d9 100644 --- a/core/lib/vm_interface/src/storage/mod.rs +++ b/core/lib/vm_interface/src/storage/mod.rs @@ -5,11 +5,13 @@ use zksync_types::{get_known_code_key, StorageKey, StorageValue, H256}; pub use self::{ // Note, that `test_infra` of the bootloader tests relies on this value to be exposed in_memory::{InMemoryStorage, IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID}, + overrides::StorageWithOverrides, snapshot::{StorageSnapshot, StorageWithSnapshot}, view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewStats}, }; mod in_memory; +mod overrides; mod snapshot; mod view; diff --git a/core/lib/vm_interface/src/storage/overrides.rs b/core/lib/vm_interface/src/storage/overrides.rs new file mode 100644 index 00000000000..ad5a3d8624f --- /dev/null +++ b/core/lib/vm_interface/src/storage/overrides.rs @@ -0,0 +1,70 @@ +//! VM storage functionality specifically used in the VM sandbox. + +use std::{ + collections::{HashMap, HashSet}, + fmt, +}; + +use zksync_types::{AccountTreeId, StorageKey, StorageValue, H256}; + +use super::ReadStorage; + +/// A storage view that allows to override some of the storage values. +#[derive(Debug)] +pub struct StorageWithOverrides { + storage_handle: S, + overridden_slots: HashMap, + overridden_factory_deps: HashMap>, + empty_accounts: HashSet, +} + +impl StorageWithOverrides { + /// Creates a new storage view based on the underlying storage. + pub fn new(storage: S) -> Self { + Self { + storage_handle: storage, + overridden_slots: HashMap::new(), + overridden_factory_deps: HashMap::new(), + empty_accounts: HashSet::new(), + } + } + + pub fn set_value(&mut self, key: StorageKey, value: StorageValue) { + self.overridden_slots.insert(key, value); + } + + pub fn store_factory_dep(&mut self, hash: H256, code: Vec) { + self.overridden_factory_deps.insert(hash, code); + } + + pub fn insert_erased_account(&mut self, account: AccountTreeId) { + self.empty_accounts.insert(account); + } +} + +impl ReadStorage for StorageWithOverrides { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + if let Some(value) = self.overridden_slots.get(key) { + return *value; + } + if self.empty_accounts.contains(key.account()) { + return H256::zero(); + } + self.storage_handle.read_value(key) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.storage_handle.is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.overridden_factory_deps + .get(&hash) + .cloned() + .or_else(|| self.storage_handle.load_factory_dep(hash)) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.storage_handle.get_enumeration_index(key) + } +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index cb80ba7c138..83f87f0fe1d 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -15,7 +15,7 @@ mod l2_block; mod system_env; /// Full environment for oneshot transaction / call execution. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct OneshotEnv { /// System environment. pub system: SystemEnv, diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index bdd57462588..7958b5ed3c1 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -8,7 +8,7 @@ use tokio::runtime::Handle; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{ executor::{OneshotExecutor, TransactionValidator}, - storage::ReadStorage, + storage::{ReadStorage, StorageWithOverrides}, tracer::{ValidationError, ValidationParams}, Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, @@ -20,11 +20,10 @@ use zksync_types::{ use zksync_vm_executor::oneshot::{MainOneshotExecutor, MockOneshotExecutor}; use super::{ - storage::StorageWithOverrides, vm_metrics::{self, SandboxStage}, BlockArgs, VmPermit, SANDBOX_METRICS, }; -use crate::tx_sender::SandboxExecutorOptions; +use crate::{execution_sandbox::storage::apply_state_override, tx_sender::SandboxExecutorOptions}; /// Action that can be executed by [`SandboxExecutor`]. #[derive(Debug)] @@ -109,6 +108,9 @@ impl SandboxExecutor { missed_storage_invocation_limit: usize, ) -> Self { let mut executor = MainOneshotExecutor::new(missed_storage_invocation_limit); + executor.set_fast_vm_mode(options.fast_vm_mode); + #[cfg(test)] + executor.panic_on_divergence(); executor .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); Self { @@ -151,7 +153,7 @@ impl SandboxExecutor { .await?; let state_override = state_override.unwrap_or_default(); - let storage = StorageWithOverrides::new(storage, &state_override); + let storage = apply_state_override(storage, &state_override); let (execution_args, tracing_params) = action.into_parts(); let result = self .inspect_transaction_with_bytecode_compression( @@ -246,13 +248,13 @@ impl SandboxExecutor { } #[async_trait] -impl OneshotExecutor for SandboxExecutor +impl OneshotExecutor> for SandboxExecutor where S: ReadStorage + Send + 'static, { async fn inspect_transaction_with_bytecode_compression( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, args: TxExecutionArgs, tracing_params: OneshotTracingParams, @@ -283,13 +285,13 @@ where } #[async_trait] -impl TransactionValidator for SandboxExecutor +impl TransactionValidator> for SandboxExecutor where S: ReadStorage + Send + 'static, { async fn validate_transaction( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, diff --git a/core/node/api_server/src/execution_sandbox/storage.rs b/core/node/api_server/src/execution_sandbox/storage.rs index bf775d48490..c80356f6e36 100644 --- a/core/node/api_server/src/execution_sandbox/storage.rs +++ b/core/node/api_server/src/execution_sandbox/storage.rs @@ -1,127 +1,67 @@ //! VM storage functionality specifically used in the VM sandbox. -use std::{ - collections::{HashMap, HashSet}, - fmt, -}; - -use zksync_multivm::interface::storage::ReadStorage; +use zksync_multivm::interface::storage::{ReadStorage, StorageWithOverrides}; use zksync_types::{ api::state_override::{OverrideState, StateOverride}, get_code_key, get_known_code_key, get_nonce_key, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, - AccountTreeId, StorageKey, StorageValue, H256, + AccountTreeId, StorageKey, H256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; -/// A storage view that allows to override some of the storage values. -#[derive(Debug)] -pub(super) struct StorageWithOverrides { - storage_handle: S, - overridden_slots: HashMap, - overridden_factory_deps: HashMap>, - overridden_accounts: HashSet, -} - -impl StorageWithOverrides { - /// Creates a new storage view based on the underlying storage. - pub(super) fn new(storage: S, state_override: &StateOverride) -> Self { - let mut this = Self { - storage_handle: storage, - overridden_slots: HashMap::new(), - overridden_factory_deps: HashMap::new(), - overridden_accounts: HashSet::new(), - }; - this.apply_state_override(state_override); - this - } - - fn apply_state_override(&mut self, state_override: &StateOverride) { - for (account, overrides) in state_override.iter() { - if let Some(balance) = overrides.balance { - let balance_key = storage_key_for_eth_balance(account); - self.overridden_slots - .insert(balance_key, u256_to_h256(balance)); - } +/// This method is blocking. +pub(super) fn apply_state_override( + storage: S, + state_override: &StateOverride, +) -> StorageWithOverrides { + let mut storage = StorageWithOverrides::new(storage); + for (account, overrides) in state_override.iter() { + if let Some(balance) = overrides.balance { + let balance_key = storage_key_for_eth_balance(account); + storage.set_value(balance_key, u256_to_h256(balance)); + } - if let Some(nonce) = overrides.nonce { - let nonce_key = get_nonce_key(account); - let full_nonce = self.read_value(&nonce_key); - let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); - let new_full_nonce = u256_to_h256(nonces_to_full_nonce(nonce, deployment_nonce)); - self.overridden_slots.insert(nonce_key, new_full_nonce); - } + if let Some(nonce) = overrides.nonce { + let nonce_key = get_nonce_key(account); + let full_nonce = storage.read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let new_full_nonce = u256_to_h256(nonces_to_full_nonce(nonce, deployment_nonce)); + storage.set_value(nonce_key, new_full_nonce); + } - if let Some(code) = &overrides.code { - let code_key = get_code_key(account); - let code_hash = code.hash(); - self.overridden_slots.insert(code_key, code_hash); - let known_code_key = get_known_code_key(&code_hash); - self.overridden_slots - .insert(known_code_key, H256::from_low_u64_be(1)); - self.store_factory_dep(code_hash, code.clone().into_bytes()); - } + if let Some(code) = &overrides.code { + let code_key = get_code_key(account); + let code_hash = code.hash(); + storage.set_value(code_key, code_hash); + let known_code_key = get_known_code_key(&code_hash); + storage.set_value(known_code_key, H256::from_low_u64_be(1)); + storage.store_factory_dep(code_hash, code.clone().into_bytes()); + } - match &overrides.state { - Some(OverrideState::State(state)) => { - let account = AccountTreeId::new(*account); - self.override_account_state_diff(account, state); - self.overridden_accounts.insert(account); + match &overrides.state { + Some(OverrideState::State(state)) => { + let account = AccountTreeId::new(*account); + for (&key, &value) in state { + storage.set_value(StorageKey::new(account, key), value); } - Some(OverrideState::StateDiff(state_diff)) => { - let account = AccountTreeId::new(*account); - self.override_account_state_diff(account, state_diff); + storage.insert_erased_account(account); + } + Some(OverrideState::StateDiff(state_diff)) => { + let account = AccountTreeId::new(*account); + for (&key, &value) in state_diff { + storage.set_value(StorageKey::new(account, key), value); } - None => { /* do nothing */ } } + None => { /* do nothing */ } } } - - fn store_factory_dep(&mut self, hash: H256, code: Vec) { - self.overridden_factory_deps.insert(hash, code); - } - - fn override_account_state_diff( - &mut self, - account: AccountTreeId, - state_diff: &HashMap, - ) { - let account_slots = state_diff - .iter() - .map(|(&slot, &value)| (StorageKey::new(account, slot), value)); - self.overridden_slots.extend(account_slots); - } -} - -impl ReadStorage for StorageWithOverrides { - fn read_value(&mut self, key: &StorageKey) -> StorageValue { - if let Some(value) = self.overridden_slots.get(key) { - return *value; - } - if self.overridden_accounts.contains(key.account()) { - return H256::zero(); - } - self.storage_handle.read_value(key) - } - - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - self.storage_handle.is_write_initial(key) - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - self.overridden_factory_deps - .get(&hash) - .cloned() - .or_else(|| self.storage_handle.load_factory_dep(hash)) - } - - fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - self.storage_handle.get_enumeration_index(key) - } + storage } #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_multivm::interface::storage::InMemoryStorage; use zksync_types::{ api::state_override::{Bytecode, OverrideAccount}, @@ -184,7 +124,7 @@ mod tests { storage.set_value(retained_key, H256::repeat_byte(0xfe)); let erased_key = StorageKey::new(AccountTreeId::new(Address::repeat_byte(5)), H256::zero()); storage.set_value(erased_key, H256::repeat_byte(1)); - let mut storage = StorageWithOverrides::new(storage, &overrides); + let mut storage = apply_state_override(storage, &overrides); let balance = storage.read_value(&storage_key_for_eth_balance(&Address::repeat_byte(1))); assert_eq!(balance, H256::from_low_u64_be(1)); diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index 9a3c88f8bf0..758547abbd6 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -5,16 +5,15 @@ use tracing::Instrument; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{ executor::TransactionValidator, + storage::StorageWithOverrides, tracer::{ValidationError as RawValidationError, ValidationParams}, }; use zksync_types::{ - api::state_override::StateOverride, fee_model::BatchFeeInput, l2::L2Tx, Address, - TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, + fee_model::BatchFeeInput, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, }; use super::{ execute::{SandboxAction, SandboxExecutor}, - storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, BlockArgs, VmPermit, }; @@ -57,7 +56,7 @@ impl SandboxExecutor { let SandboxAction::Execution { tx, .. } = action else { unreachable!(); // by construction }; - let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + let storage = StorageWithOverrides::new(storage); let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); let validation_result = self diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index c2f900484ba..518f60c6ba4 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -10,7 +10,7 @@ use zksync_contracts::{ }; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_system_constants::{L2_BASE_TOKEN_ADDRESS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE}; use zksync_types::{ api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, ethabi, @@ -18,11 +18,12 @@ use zksync_types::{ fee::Fee, fee_model::FeeParams, get_code_key, get_known_code_key, + l1::L1Tx, l2::L2Tx, - transaction_request::{CallRequest, PaymasterParams}, + transaction_request::{CallRequest, Eip712Meta, PaymasterParams}, utils::storage_key_for_eth_balance, AccountTreeId, Address, K256PrivateKey, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - StorageKey, StorageLog, H256, U256, + StorageKey, StorageLog, EIP_712_TX_TYPE, H256, U256, }; use zksync_utils::{address_to_u256, u256_to_h256}; @@ -343,6 +344,8 @@ pub(crate) trait TestAccount { fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx; + fn create_l1_counter_tx(&self, increment: U256, revert: bool) -> L1Tx; + fn query_counter_value(&self) -> CallRequest; fn create_infinite_loop_tx(&self) -> L2Tx; @@ -482,6 +485,26 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn create_l1_counter_tx(&self, increment: U256, revert: bool) -> L1Tx { + let calldata = load_contract(COUNTER_CONTRACT_PATH) + .function("incrementWithRevert") + .expect("no `incrementWithRevert` function") + .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) + .expect("failed encoding `incrementWithRevert` input"); + let request = CallRequest { + data: Some(calldata.into()), + from: Some(self.address()), + to: Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + transaction_type: Some(EIP_712_TX_TYPE.into()), + eip712_meta: Some(Eip712Meta { + gas_per_pubdata: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Eip712Meta::default() + }), + ..CallRequest::default() + }; + L1Tx::from_request(request, false).unwrap() + } + fn query_counter_value(&self) -> CallRequest { let calldata = load_contract(COUNTER_CONTRACT_PATH) .function("get") diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 38794fe7137..75cc1ad602f 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -25,6 +25,7 @@ use zksync_types::{ l2::{error::TxCheckError::TxDuplication, L2Tx}, transaction_request::CallOverrides, utils::storage_key_for_eth_balance, + vm::FastVmMode, AccountTreeId, Address, L2ChainId, Nonce, ProtocolVersionId, Transaction, H160, H256, MAX_NEW_FACTORY_DEPS, U256, }; @@ -89,6 +90,7 @@ pub async fn build_tx_sender( /// Oneshot executor options used by the API server sandbox. #[derive(Debug)] pub struct SandboxExecutorOptions { + pub(crate) fast_vm_mode: FastVmMode, /// Env parameters to be used when estimating gas. pub(crate) estimate_gas: OneshotEnvParameters, /// Env parameters to be used when performing `eth_call` requests. @@ -114,6 +116,7 @@ impl SandboxExecutorOptions { .context("failed loading base contracts for calls / tx execution")?; Ok(Self { + fast_vm_mode: FastVmMode::Old, estimate_gas: OneshotEnvParameters::new( Arc::new(estimate_gas_contracts), chain_id, @@ -129,6 +132,11 @@ impl SandboxExecutorOptions { }) } + /// Sets the fast VM mode used by this executor. + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + self.fast_vm_mode = fast_vm_mode; + } + pub(crate) async fn mock() -> Self { Self::new(L2ChainId::default(), AccountTreeId::default(), u32::MAX) .await diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 4528d9cda12..7db1b833931 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -74,6 +74,28 @@ async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractE test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; } +#[tokio::test] +async fn initial_gas_estimate_for_l1_transaction() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + let tx = alice.create_l1_counter_tx(1.into(), false); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + assert!(initial_estimate.total_gas_charged.is_none()); + + let (vm_result, _) = estimator.unadjusted_step(15_000).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + let (vm_result, _) = estimator.unadjusted_step(1_000_000).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + #[test_casing(2, [false, true])] #[tokio::test] async fn initial_estimate_for_deep_recursion(with_reads: bool) { @@ -322,9 +344,10 @@ async fn insufficient_funds_error_for_transfer() { async fn test_estimating_gas( state_override: StateOverride, - tx: L2Tx, + tx: impl Into, acceptable_overestimation: u64, ) { + let tx = tx.into(); let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; let block_args = pending_block_args(&tx_sender).await; @@ -332,7 +355,7 @@ async fn test_estimating_gas( let fee_scale_factor = 1.0; let fee = tx_sender .get_txs_fee_in_wei( - tx.clone().into(), + tx.clone(), block_args.clone(), fee_scale_factor, acceptable_overestimation, @@ -350,7 +373,7 @@ async fn test_estimating_gas( let fee = tx_sender .get_txs_fee_in_wei( - tx.into(), + tx, block_args, fee_scale_factor, acceptable_overestimation, @@ -383,6 +406,15 @@ async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { test_estimating_gas(state_override, tx, acceptable_overestimation).await; } +#[tokio::test] +async fn estimating_gas_for_l1_transaction() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + let tx = alice.create_l1_counter_tx(1.into(), false); + + test_estimating_gas(state_override, tx, 0).await; +} + #[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] #[tokio::test] async fn estimating_gas_for_load_test_tx( diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs index cacd616202d..ea3f77fbcd8 100644 --- a/core/node/api_server/src/tx_sender/tests/mod.rs +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -145,13 +145,14 @@ async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { drop(storage); let genesis_config = genesis_params.config(); - let executor_options = SandboxExecutorOptions::new( + let mut executor_options = SandboxExecutorOptions::new( genesis_config.l2_chain_id, AccountTreeId::new(genesis_config.fee_account), u32::MAX, ) .await .unwrap(); + executor_options.set_fast_vm_mode(FastVmMode::Shadow); let pg_caches = PostgresStorageCaches::new(1, 1); let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 45128f579cd..7dd0164198a 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -16,8 +16,8 @@ use zksync_multivm::interface::{ }; use zksync_types::{ api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, - transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, - StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, + transaction_request::CallRequest, vm::FastVmMode, K256PrivateKey, L2ChainId, + PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, }; use zksync_utils::u256_to_h256; use zksync_vm_executor::oneshot::{ @@ -92,6 +92,7 @@ impl BaseSystemContractsProvider for BaseContractsWithMockE fn executor_options_with_evm_emulator() -> SandboxExecutorOptions { let base_contracts = Arc::::default(); SandboxExecutorOptions { + fast_vm_mode: FastVmMode::Old, estimate_gas: OneshotEnvParameters::new( base_contracts.clone(), L2ChainId::default(), diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 5e9aadc8f37..80ff647ff5d 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -127,7 +127,21 @@ impl EN { ) .await .wrap("Store::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("Store::runner()")?) }); + + // Run the temporary fetcher until the certificates are backfilled. + // Temporary fetcher should be removed once json RPC syncing is fully deprecated. + s.spawn_bg({ + let store = store.clone(); + async { + let store = store; + self.temporary_block_fetcher(ctx, &store).await?; + tracing::info!( + "temporary block fetcher finished, switching to p2p fetching only" + ); + Ok(()) + } + }); // Run the temporary fetcher until the certificates are backfilled. // Temporary fetcher should be removed once json RPC syncing is fully deprecated. @@ -146,14 +160,25 @@ impl EN { let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("BlockStore::run()")?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(self.run_attestation_controller( - ctx, - global_config.clone(), - attestation.clone(), - )); + s.spawn_bg({ + let global_config = global_config.clone(); + let attestation = attestation.clone(); + async { + let res = self + .run_attestation_controller(ctx, global_config, attestation) + .await + .wrap("run_attestation_controller()"); + // Attestation currently is not critical for the node to function. + // If it fails, we just log the error and continue. + if let Err(err) = res { + tracing::error!("attestation controller failed: {err:#}"); + } + Ok(()) + } + }); let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, build_version)?, diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 2a280b2f161..a392acfbe5f 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -30,7 +30,7 @@ pub async fn run_main_node( tracing::debug!(is_attester = attester.is_some(), "main node attester mode"); - scope::run!(&ctx, |ctx, s| async { + let res: ctx::Result<()> = scope::run!(&ctx, |ctx, s| async { if let Some(spec) = &cfg.genesis_spec { let spec = config::GenesisSpec::parse(spec).context("GenesisSpec::parse()")?; @@ -46,7 +46,7 @@ pub async fn run_main_node( let (store, runner) = Store::new(ctx, pool.clone(), None, None) .await .wrap("Store::new()")?; - s.spawn_bg(runner.run(ctx)); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("Store::runner()")?) }); let global_config = pool .connection(ctx) @@ -56,25 +56,36 @@ pub async fn run_main_node( .await .wrap("global_config()")? .context("global_config() disappeared")?; - anyhow::ensure!( - global_config.genesis.leader_selection - == validator::LeaderSelectionMode::Sticky(validator_key.public()), - "unsupported leader selection mode - main node has to be the leader" - ); + if global_config.genesis.leader_selection + != validator::LeaderSelectionMode::Sticky(validator_key.public()) + { + return Err(anyhow::format_err!( + "unsupported leader selection mode - main node has to be the leader" + ) + .into()); + } let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("BlockStore::run()")?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(run_attestation_controller( - ctx, - &pool, - global_config.clone(), - attestation.clone(), - )); - + s.spawn_bg({ + let global_config = global_config.clone(); + let attestation = attestation.clone(); + async { + let res = run_attestation_controller(ctx, &pool, global_config, attestation) + .await + .wrap("run_attestation_controller()"); + // Attestation currently is not critical for the node to function. + // If it fails, we just log the error and continue. + if let Err(err) = res { + tracing::error!("attestation controller failed: {err:#}"); + } + Ok(()) + } + }); let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, @@ -87,9 +98,14 @@ pub async fn run_main_node( }; tracing::info!("running the main node executor"); - executor.run(ctx).await + executor.run(ctx).await.context("executor")?; + Ok(()) }) - .await + .await; + match res { + Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), + Err(ctx::Error::Internal(err)) => Err(err), + } } /// Manages attestation state by configuring the @@ -100,91 +116,84 @@ async fn run_attestation_controller( pool: &ConnectionPool, cfg: consensus_dal::GlobalConfig, attestation: Arc, -) -> anyhow::Result<()> { +) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); let registry = registry::Registry::new(cfg.genesis, pool.clone()).await; let registry_addr = cfg.registry_address.map(registry::Address::new); let mut next = attester::BatchNumber(0); - let res = async { - loop { - // After regenesis it might happen that the batch number for the first block - // is not immediately known (the first block was not produced yet), - // therefore we need to wait for it. - let status = loop { - match pool - .connection(ctx) - .await - .wrap("connection()")? - .attestation_status(ctx) - .await - .wrap("attestation_status()")? - { - Some(status) if status.next_batch_to_attest >= next => break status, - _ => {} - } - ctx.sleep(POLL_INTERVAL).await?; - }; - next = status.next_batch_to_attest.next(); - tracing::info!( - "waiting for hash of batch {:?}", - status.next_batch_to_attest - ); - let info = pool - .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) - .await?; - let hash = consensus_dal::batch_hash(&info); - let Some(committee) = registry - .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) - .await - .wrap("attester_committee_for()")? - else { - tracing::info!("attestation not required"); - continue; - }; - let committee = Arc::new(committee); - // Persist the derived committee. - pool.connection(ctx) - .await - .wrap("connection")? - .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) - .await - .wrap("upsert_attester_committee()")?; - tracing::info!( - "attesting batch {:?} with hash {hash:?}", - status.next_batch_to_attest - ); - attestation - .start_attestation(Arc::new(attestation::Info { - batch_to_attest: attester::Batch { - hash, - number: status.next_batch_to_attest, - genesis: status.genesis, - }, - committee, - })) - .await - .context("start_attestation()")?; - // Main node is the only node which can update the global AttestationStatus, - // therefore we can synchronously wait for the certificate. - let qc = attestation - .wait_for_cert(ctx, status.next_batch_to_attest) - .await? - .context("attestation config has changed unexpectedly")?; - tracing::info!( - "collected certificate for batch {:?}", - status.next_batch_to_attest - ); - pool.connection(ctx) + loop { + // After regenesis it might happen that the batch number for the first block + // is not immediately known (the first block was not produced yet), + // therefore we need to wait for it. + let status = loop { + match pool + .connection(ctx) .await .wrap("connection()")? - .insert_batch_certificate(ctx, &qc) + .attestation_status(ctx) .await - .wrap("insert_batch_certificate()")?; - } - } - .await; - match res { - Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), - Err(ctx::Error::Internal(err)) => Err(err), + .wrap("attestation_status()")? + { + Some(status) if status.next_batch_to_attest >= next => break status, + _ => {} + } + ctx.sleep(POLL_INTERVAL).await?; + }; + next = status.next_batch_to_attest.next(); + tracing::info!( + "waiting for hash of batch {:?}", + status.next_batch_to_attest + ); + let info = pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) + .await?; + let hash = consensus_dal::batch_hash(&info); + let Some(committee) = registry + .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + pool.connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; + tracing::info!( + "attesting batch {:?} with hash {hash:?}", + status.next_batch_to_attest + ); + attestation + .start_attestation(Arc::new(attestation::Info { + batch_to_attest: attester::Batch { + hash, + number: status.next_batch_to_attest, + genesis: status.genesis, + }, + committee, + })) + .await + .context("start_attestation()")?; + // Main node is the only node which can update the global AttestationStatus, + // therefore we can synchronously wait for the certificate. + let qc = attestation + .wait_for_cert(ctx, status.next_batch_to_attest) + .await? + .context("attestation config has changed unexpectedly")?; + tracing::info!( + "collected certificate for batch {:?}", + status.next_batch_to_attest + ); + pool.connection(ctx) + .await + .wrap("connection()")? + .insert_batch_certificate(ctx, &qc) + .await + .wrap("insert_batch_certificate()")?; } } diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 46b84c34061..cbd4918dcee 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -11,7 +11,8 @@ use zksync_vm_executor::oneshot::{ CallOrExecute, MainOneshotExecutor, MultiVMBaseSystemContracts, OneshotEnvParameters, }; use zksync_vm_interface::{ - executor::OneshotExecutor, ExecutionResult, OneshotTracingParams, TxExecutionArgs, + executor::OneshotExecutor, storage::StorageWithOverrides, ExecutionResult, + OneshotTracingParams, TxExecutionArgs, }; use crate::{abi, storage::ConnectionPool}; @@ -89,7 +90,7 @@ impl VM { let output = ctx .wait(self.executor.inspect_transaction_with_bytecode_compression( - storage, + StorageWithOverrides::new(storage), env, TxExecutionArgs::for_eth_call(tx), OneshotTracingParams::default(), diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index ba1a69e23bb..023ef1059c7 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -6,7 +6,7 @@ use zksync_node_api_server::{ tx_sender::{SandboxExecutorOptions, TxSenderBuilder, TxSenderConfig}, }; use zksync_state::{PostgresStorageCaches, PostgresStorageCachesTask}; -use zksync_types::{AccountTreeId, Address}; +use zksync_types::{vm::FastVmMode, AccountTreeId, Address}; use zksync_web3_decl::{ client::{DynClient, L2}, jsonrpsee, @@ -60,6 +60,7 @@ pub struct TxSenderLayer { postgres_storage_caches_config: PostgresStorageCachesConfig, max_vm_concurrency: usize, whitelisted_tokens_for_aa_cache: bool, + vm_mode: FastVmMode, } #[derive(Debug, FromContext)] @@ -95,6 +96,7 @@ impl TxSenderLayer { postgres_storage_caches_config, max_vm_concurrency, whitelisted_tokens_for_aa_cache: false, + vm_mode: FastVmMode::Old, } } @@ -106,6 +108,12 @@ impl TxSenderLayer { self.whitelisted_tokens_for_aa_cache = value; self } + + /// Sets the fast VM modes used for all supported operations. + pub fn with_vm_mode(mut self, mode: FastVmMode) -> Self { + self.vm_mode = mode; + self + } } #[async_trait::async_trait] @@ -151,12 +159,13 @@ impl WiringLayer for TxSenderLayer { // TODO (BFT-138): Allow to dynamically reload API contracts let config = self.tx_sender_config; - let executor_options = SandboxExecutorOptions::new( + let mut executor_options = SandboxExecutorOptions::new( config.chain_id, AccountTreeId::new(config.fee_account_addr), config.validation_computational_gas_limit, ) .await?; + executor_options.set_fast_vm_mode(self.vm_mode); // Build `TxSender`. let mut tx_sender = TxSenderBuilder::new(config, replica_pool, tx_sink); diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 8e06d0c26bc..b265b94d4d7 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -164,7 +164,7 @@ impl TeeRequestProcessor { .tee_proof_generation_dal() .lock_batch_for_proving( tee_type, - self.config.proof_generation_timeout(), + self.config.tee_config.tee_proof_generation_timeout(), min_batch_number, ) .await diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 63ea087a81c..87c6bff8a1f 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -28,6 +28,7 @@ async fn request_tee_proof_inputs() { tee_config: TeeConfig { tee_support: true, first_tee_processed_batch: L1BatchNumber(0), + tee_proof_generation_timeout_in_secs: 600, }, }, L1BatchCommitmentMode::Rollup, @@ -86,6 +87,7 @@ async fn submit_tee_proof() { tee_config: TeeConfig { tee_support: true, first_tee_processed_batch: L1BatchNumber(0), + tee_proof_generation_timeout_in_secs: 600, }, }, L1BatchCommitmentMode::Rollup, diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 59c1e21493b..892bcf1c105 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -21,7 +21,7 @@ tokio.workspace = true [dev-dependencies] assert_matches.workspace = true -iai.workspace = true +yab.workspace = true [[bench]] name = "oneshot" @@ -32,5 +32,5 @@ name = "batch" harness = false [[bench]] -name = "iai" +name = "instructions" harness = false diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs deleted file mode 100644 index 8cbb9f10dd8..00000000000 --- a/core/tests/vm-benchmark/benches/iai.rs +++ /dev/null @@ -1,35 +0,0 @@ -use iai::black_box; -use vm_benchmark::{BenchmarkingVm, BenchmarkingVmFactory, Bytecode, Fast, Legacy}; - -fn run_bytecode(name: &str) { - let tx = Bytecode::get(name).deploy_tx(); - black_box(BenchmarkingVm::::default().run_transaction(&tx)); -} - -macro_rules! make_functions_and_main { - ($($file:ident => $legacy_name:ident,)+) => { - $( - fn $file() { - run_bytecode::(stringify!($file)); - } - - fn $legacy_name() { - run_bytecode::(stringify!($file)); - } - )+ - - iai::main!($($file, $legacy_name,)+); - }; -} - -make_functions_and_main!( - access_memory => access_memory_legacy, - call_far => call_far_legacy, - decode_shl_sub => decode_shl_sub_legacy, - deploy_simple_contract => deploy_simple_contract_legacy, - finish_eventful_frames => finish_eventful_frames_legacy, - write_and_decode => write_and_decode_legacy, - event_spam => event_spam_legacy, - slot_hash_collision => slot_hash_collision_legacy, - heap_read_write => heap_read_write_legacy, -); diff --git a/core/tests/vm-benchmark/benches/instructions.rs b/core/tests/vm-benchmark/benches/instructions.rs new file mode 100644 index 00000000000..654dfef71b2 --- /dev/null +++ b/core/tests/vm-benchmark/benches/instructions.rs @@ -0,0 +1,206 @@ +//! Measures the number of host instructions required to run the benchmark bytecodes. + +use std::{env, sync::mpsc}; + +use vise::{Gauge, LabeledFamily, Metrics}; +use vm_benchmark::{ + criterion::PrometheusRuntime, BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, BYTECODES, +}; +use yab::{ + reporter::{BenchmarkOutput, BenchmarkReporter, Reporter}, + AccessSummary, BenchMode, Bencher, BenchmarkId, +}; + +fn benchmarks_for_vm(bencher: &mut Bencher) { + bencher.bench( + BenchmarkId::new("init", VM::LABEL.as_str()), + BenchmarkingVm::::default, + ); + + for bytecode in BYTECODES { + bencher.bench_with_capture( + BenchmarkId::new(bytecode.name, VM::LABEL.as_str()), + |capture| { + let mut vm = yab::black_box(BenchmarkingVm::::default()); + let tx = yab::black_box(bytecode.deploy_tx()); + capture.measure(|| vm.run_transaction(&tx)); + }, + ); + } +} + +/// Reporter that pushes cachegrind metrics to Prometheus. +#[derive(Debug)] +struct MetricsReporter { + _runtime: Option, +} + +impl Default for MetricsReporter { + fn default() -> Self { + Self { + _runtime: PrometheusRuntime::new(), + } + } +} + +impl Reporter for MetricsReporter { + fn new_benchmark(&mut self, id: &BenchmarkId) -> Box { + Box::new(MetricsBenchmarkReporter(id.clone())) + } +} + +#[derive(Debug)] +struct MetricsBenchmarkReporter(BenchmarkId); + +impl BenchmarkReporter for MetricsBenchmarkReporter { + fn ok(self: Box, output: &BenchmarkOutput) { + #[derive(Debug, Metrics)] + #[metrics(prefix = "vm_cachegrind")] + struct VmCachegrindMetrics { + #[metrics(labels = ["benchmark"])] + instructions: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + l1_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + l2_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + ram_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + cycles: LabeledFamily>, + } + + #[vise::register] + static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); + + let id = self.0.to_string(); + VM_CACHEGRIND_METRICS.instructions[&id].set(output.stats.total_instructions()); + if let Some(&full) = output.stats.as_full() { + let summary = AccessSummary::from(full); + VM_CACHEGRIND_METRICS.l1_accesses[&id].set(summary.l1_hits); + VM_CACHEGRIND_METRICS.l2_accesses[&id].set(summary.l3_hits); + VM_CACHEGRIND_METRICS.ram_accesses[&id].set(summary.ram_accesses); + VM_CACHEGRIND_METRICS.cycles[&id].set(summary.estimated_cycles()); + } + } +} + +#[derive(Debug, Clone, Copy)] +struct Comparison { + current_cycles: u64, + prev_cycles: Option, +} + +impl Comparison { + fn percent_difference(a: u64, b: u64) -> f64 { + ((b as i64) - (a as i64)) as f64 / (a as f64) * 100.0 + } + + fn new(output: &BenchmarkOutput) -> Option { + let current_cycles = AccessSummary::from(*output.stats.as_full()?).estimated_cycles(); + let prev_cycles = if let Some(prev_stats) = &output.prev_stats { + Some(AccessSummary::from(*prev_stats.as_full()?).estimated_cycles()) + } else { + None + }; + + Some(Self { + current_cycles, + prev_cycles, + }) + } + + fn cycles_diff(&self) -> Option { + self.prev_cycles + .map(|prev_cycles| Self::percent_difference(prev_cycles, self.current_cycles)) + } +} + +/// Reporter that outputs diffs in a Markdown table to stdout after all benchmarks are completed. +/// +/// Significant diff level can be changed via `BENCHMARK_DIFF_THRESHOLD_PERCENT` env var; it is set to 1% by default. +#[derive(Debug)] +struct ComparisonReporter { + comparisons_sender: mpsc::Sender<(String, Comparison)>, + comparisons_receiver: mpsc::Receiver<(String, Comparison)>, +} + +impl Default for ComparisonReporter { + fn default() -> Self { + let (comparisons_sender, comparisons_receiver) = mpsc::channel(); + Self { + comparisons_sender, + comparisons_receiver, + } + } +} + +impl Reporter for ComparisonReporter { + fn new_benchmark(&mut self, id: &BenchmarkId) -> Box { + Box::new(BenchmarkComparison { + comparisons_sender: self.comparisons_sender.clone(), + id: id.clone(), + }) + } + + fn ok(self: Box) { + const ENV_VAR: &str = "BENCHMARK_DIFF_THRESHOLD_PERCENT"; + + let diff_threshold = env::var(ENV_VAR).unwrap_or_else(|_| "1.0".into()); + let diff_threshold: f64 = diff_threshold.parse().unwrap_or_else(|err| { + panic!("incorrect `{ENV_VAR}` value: {err}"); + }); + + // Drop the sender to not hang on the iteration below. + drop(self.comparisons_sender); + let mut comparisons: Vec<_> = self.comparisons_receiver.iter().collect(); + comparisons.retain(|(_, diff)| { + // Output all stats if `diff_threshold <= 0.0` since this is what the user expects + diff.cycles_diff().unwrap_or(0.0) >= diff_threshold + }); + if comparisons.is_empty() { + return; + } + + comparisons.sort_unstable_by(|(name, _), (other_name, _)| name.cmp(other_name)); + + println!("\n## Detected VM performance changes"); + println!("Benchmark name | Est. cycles | Change in est. cycles |"); + println!("|:---|---:|---:|"); + for (name, comparison) in &comparisons { + let diff = comparison + .cycles_diff() + .map_or_else(|| "N/A".to_string(), |diff| format!("{diff:+.1}%")); + println!("| {name} | {} | {diff} |", comparison.current_cycles); + } + } +} + +#[derive(Debug)] +struct BenchmarkComparison { + comparisons_sender: mpsc::Sender<(String, Comparison)>, + id: BenchmarkId, +} + +impl BenchmarkReporter for BenchmarkComparison { + fn ok(self: Box, output: &BenchmarkOutput) { + if let Some(diff) = Comparison::new(output) { + self.comparisons_sender + .send((self.id.to_string(), diff)) + .ok(); + } + } +} + +fn benchmarks(bencher: &mut Bencher) { + if bencher.mode() == BenchMode::PrintResults { + // Only customize reporting if outputting previously collected benchmark result in order to prevent + // reporters influencing cachegrind stats. + bencher + .add_reporter(MetricsReporter::default()) + .add_reporter(ComparisonReporter::default()); + } + benchmarks_for_vm::(bencher); + benchmarks_for_vm::(bencher); +} + +yab::main!(benchmarks); diff --git a/core/tests/vm-benchmark/src/bin/common/mod.rs b/core/tests/vm-benchmark/src/bin/common/mod.rs deleted file mode 100644 index a92c9d5f710..00000000000 --- a/core/tests/vm-benchmark/src/bin/common/mod.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::io::BufRead; - -#[derive(Debug)] -pub struct IaiResult { - pub name: String, - pub instructions: u64, - pub l1_accesses: u64, - pub l2_accesses: u64, - pub ram_accesses: u64, - pub cycles: u64, -} - -pub fn parse_iai(iai_output: R) -> impl Iterator { - IaiResultParser { - lines: iai_output.lines().map(|x| x.unwrap()), - } -} - -struct IaiResultParser> { - lines: I, -} - -impl> Iterator for IaiResultParser { - type Item = IaiResult; - - fn next(&mut self) -> Option { - self.lines.next().map(|name| { - let result = IaiResult { - name, - instructions: self.parse_stat(), - l1_accesses: self.parse_stat(), - l2_accesses: self.parse_stat(), - ram_accesses: self.parse_stat(), - cycles: self.parse_stat(), - }; - self.lines.next(); - result - }) - } -} - -impl> IaiResultParser { - fn parse_stat(&mut self) -> u64 { - let line = self.lines.next().unwrap(); - let number = line - .split(':') - .nth(1) - .unwrap() - .split_whitespace() - .next() - .unwrap(); - number.parse().unwrap() - } -} diff --git a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs deleted file mode 100644 index c274b039c9b..00000000000 --- a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs +++ /dev/null @@ -1,108 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - fs::File, - io::{BufRead, BufReader}, -}; - -pub use crate::common::parse_iai; - -mod common; - -fn main() { - let [iai_before, iai_after, opcodes_before, opcodes_after] = std::env::args() - .skip(1) - .take(4) - .collect::>() - .try_into() - .expect("expected four arguments"); - - let iai_before = get_name_to_cycles(&iai_before); - let iai_after = get_name_to_cycles(&iai_after); - let opcodes_before = get_name_to_opcodes(&opcodes_before); - let opcodes_after = get_name_to_opcodes(&opcodes_after); - - let perf_changes = iai_before - .keys() - .collect::>() - .intersection(&iai_after.keys().collect()) - .map(|&name| (name, percent_difference(iai_before[name], iai_after[name]))) - .collect::>(); - - let duration_changes = opcodes_before - .keys() - .collect::>() - .intersection(&opcodes_after.keys().collect()) - .map(|&name| { - let opcodes_abs_diff = (opcodes_after[name] as i64) - (opcodes_before[name] as i64); - (name, opcodes_abs_diff) - }) - .collect::>(); - - let mut nonzero_diff = false; - - for name in perf_changes - .iter() - .filter_map(|(key, value)| (value.abs() > 2.).then_some(key)) - .collect::>() - .union( - &duration_changes - .iter() - .filter_map(|(key, value)| (*value != 0).then_some(key)) - .collect(), - ) - { - // write the header before writing the first line of diff - if !nonzero_diff { - println!("Benchmark name | change in estimated runtime | change in number of opcodes executed \n--- | --- | ---"); - nonzero_diff = true; - } - - let n_a = "N/A".to_string(); - println!( - "{} | {} | {}", - name, - perf_changes - .get(**name) - .map(|percent| format!("{:+.1}%", percent)) - .unwrap_or(n_a.clone()), - duration_changes - .get(**name) - .map(|abs_diff| format!( - "{:+} ({:+.1}%)", - abs_diff, - percent_difference(opcodes_before[**name], opcodes_after[**name]) - )) - .unwrap_or(n_a), - ); - } - - if nonzero_diff { - println!("\n Changes in number of opcodes executed indicate that the gas price of the benchmark has changed, which causes it run out of gas at a different time. Or that it is behaving completely differently."); - } -} - -fn percent_difference(a: u64, b: u64) -> f64 { - ((b as f64) - (a as f64)) / (a as f64) * 100.0 -} - -fn get_name_to_cycles(filename: &str) -> HashMap { - parse_iai(BufReader::new( - File::open(filename).expect("failed to open file"), - )) - .map(|x| (x.name, x.cycles)) - .collect() -} - -fn get_name_to_opcodes(filename: &str) -> HashMap { - BufReader::new(File::open(filename).expect("failed to open file")) - .lines() - .map(|line| { - let line = line.unwrap(); - let mut it = line.split_whitespace(); - ( - it.next().unwrap().to_string(), - it.next().unwrap().parse().unwrap(), - ) - }) - .collect() -} diff --git a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs deleted file mode 100644 index 3b3aa05bf69..00000000000 --- a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::{env, io::BufReader, time::Duration}; - -use tokio::sync::watch; -use vise::{Gauge, LabeledFamily, Metrics}; -use zksync_vlog::prometheus::PrometheusExporterConfig; - -use crate::common::{parse_iai, IaiResult}; - -mod common; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_cachegrind")] -pub(crate) struct VmCachegrindMetrics { - #[metrics(labels = ["benchmark"])] - pub instructions: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l1_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l2_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub ram_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub cycles: LabeledFamily>, -} - -#[vise::register] -pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); - -#[tokio::main] -async fn main() { - let results: Vec = parse_iai(BufReader::new(std::io::stdin())).collect(); - - let endpoint = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL") - .expect("`BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL` env var is not set"); - let (stop_sender, stop_receiver) = watch::channel(false); - let prometheus_config = - PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); - tokio::spawn(prometheus_config.run(stop_receiver)); - - for result in results { - let name = result.name; - VM_CACHEGRIND_METRICS.instructions[&name.clone()].set(result.instructions); - VM_CACHEGRIND_METRICS.l1_accesses[&name.clone()].set(result.l1_accesses); - VM_CACHEGRIND_METRICS.l2_accesses[&name.clone()].set(result.l2_accesses); - VM_CACHEGRIND_METRICS.ram_accesses[&name.clone()].set(result.ram_accesses); - VM_CACHEGRIND_METRICS.cycles[&name].set(result.cycles); - } - - println!("Waiting for push to happen..."); - tokio::time::sleep(Duration::from_secs(1)).await; - stop_sender.send_replace(true); -} diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs index 96208007fd9..ece30a66cee 100644 --- a/core/tests/vm-benchmark/src/bin/instruction_counts.rs +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -1,16 +1,100 @@ //! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. -use vm_benchmark::{BenchmarkingVmFactory, Fast, Legacy, BYTECODES}; +use std::{collections::BTreeMap, env, fs, io, path::PathBuf}; -fn main() { - for bytecode in BYTECODES { - let tx = bytecode.deploy_tx(); - let name = bytecode.name; - println!("{name} {}", Fast::<()>::count_instructions(&tx)); - println!( - "{} {}", - name.to_string() + "_legacy", - Legacy::count_instructions(&tx) - ); +use vm_benchmark::{CountInstructions, Fast, Legacy, BYTECODES}; + +#[derive(Debug)] +enum Command { + Print, + Diff { old: PathBuf }, +} + +impl Command { + fn from_env() -> Self { + let mut args = env::args().skip(1); + let Some(first) = args.next() else { + return Self::Print; + }; + assert_eq!(first, "--diff", "Unsupported command-line arg"); + let old = args.next().expect("`--diff` requires a path to old file"); + Self::Diff { old: old.into() } } + + fn print_instructions(counts: &BTreeMap<&str, usize>) { + for (bytecode_name, count) in counts { + println!("{bytecode_name} {count}"); + } + } + + fn parse_counts(reader: impl io::BufRead) -> BTreeMap { + let mut counts = BTreeMap::new(); + for line in reader.lines() { + let line = line.unwrap(); + if line.is_empty() { + continue; + } + let (name, count) = line.split_once(' ').expect("invalid output format"); + let count = count.parse().unwrap_or_else(|err| { + panic!("invalid count for `{name}`: {err}"); + }); + counts.insert(name.to_owned(), count); + } + counts + } + + fn run(self) { + let counts: BTreeMap<_, _> = BYTECODES + .iter() + .map(|bytecode| { + let tx = bytecode.deploy_tx(); + // We have a unit test comparing stats, but do it here as well just in case. + let fast_count = Fast::count_instructions(&tx); + let legacy_count = Legacy::count_instructions(&tx); + assert_eq!( + fast_count, legacy_count, + "mismatch on number of instructions on bytecode `{}`", + bytecode.name + ); + + (bytecode.name, fast_count) + }) + .collect(); + + match self { + Self::Print => Self::print_instructions(&counts), + Self::Diff { old } => { + let file = fs::File::open(&old).unwrap_or_else(|err| { + panic!("failed opening `{}`: {err}", old.display()); + }); + let reader = io::BufReader::new(file); + let old_counts = Self::parse_counts(reader); + + let differing_counts: Vec<_> = counts + .iter() + .filter_map(|(&name, &new_count)| { + let old_count = *old_counts.get(name)?; + (old_count != new_count).then_some((name, old_count, new_count)) + }) + .collect(); + + if !differing_counts.is_empty() { + println!("## ⚠ Detected differing instruction counts"); + println!("| Benchmark | Old count | New count |"); + println!("|-----------|----------:|----------:|"); + for (name, old_count, new_count) in differing_counts { + println!("| {name} | {old_count} | {new_count} |"); + } + println!( + "\nChanges in number of opcodes executed indicate that the gas price of the benchmark has changed, \ + which causes it to run out of gas at a different time." + ); + } + } + } + } +} + +fn main() { + Command::from_env().run(); } diff --git a/core/tests/vm-benchmark/src/criterion.rs b/core/tests/vm-benchmark/src/criterion.rs index 9515ac4ef98..024ccf14139 100644 --- a/core/tests/vm-benchmark/src/criterion.rs +++ b/core/tests/vm-benchmark/src/criterion.rs @@ -57,7 +57,7 @@ struct VmBenchmarkMetrics { static METRICS: vise::Global = vise::Global::new(); #[derive(Debug)] -struct PrometheusRuntime { +pub struct PrometheusRuntime { stop_sender: watch::Sender, _runtime: tokio::runtime::Runtime, } @@ -72,7 +72,7 @@ impl Drop for PrometheusRuntime { } impl PrometheusRuntime { - fn new() -> Option { + pub fn new() -> Option { const PUSH_INTERVAL: Duration = Duration::from_millis(100); let gateway_url = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL").ok()?; @@ -164,7 +164,7 @@ thread_local! { static BIN_NAME: SyncOnceCell<&'static str> = SyncOnceCell::new(); -/// Measurement for criterion that exports . +/// Measurement for criterion that exports timing-related metrics. #[derive(Debug)] pub struct MeteredTime { _prometheus: Option, diff --git a/core/tests/vm-benchmark/src/lib.rs b/core/tests/vm-benchmark/src/lib.rs index 4bd008d3319..9c4f547c1de 100644 --- a/core/tests/vm-benchmark/src/lib.rs +++ b/core/tests/vm-benchmark/src/lib.rs @@ -6,7 +6,7 @@ pub use crate::{ get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, }, - vm::{BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, VmLabel}, + vm::{BenchmarkingVm, BenchmarkingVmFactory, CountInstructions, Fast, Legacy, VmLabel}, }; pub mod criterion; diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index 922fb24512b..0f3018dd486 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -72,19 +72,21 @@ pub trait BenchmarkingVmFactory { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance; +} +pub trait CountInstructions { /// Counts instructions executed by the VM while processing the transaction. fn count_instructions(tx: &Transaction) -> usize; } /// Factory for the new / fast VM. #[derive(Debug)] -pub struct Fast(Tr); +pub struct Fast; -impl BenchmarkingVmFactory for Fast { +impl BenchmarkingVmFactory for Fast { const LABEL: VmLabel = VmLabel::Fast; - type Instance = vm_fast::Vm<&'static InMemoryStorage, Tr>; + type Instance = vm_fast::Vm<&'static InMemoryStorage>; fn create( batch_env: L1BatchEnv, @@ -93,27 +95,30 @@ impl BenchmarkingVmFactory for Fast ) -> Self::Instance { vm_fast::Vm::custom(batch_env, system_env, storage) } +} +impl CountInstructions for Fast { fn count_instructions(tx: &Transaction) -> usize { - let mut vm = BenchmarkingVm::>::default(); - vm.0.push_transaction(tx.clone()); + use vm_fast::interface as vm2; #[derive(Default)] struct InstructionCount(usize); - impl vm_fast::Tracer for InstructionCount { - fn before_instruction< - OP: zksync_vm2::interface::OpcodeType, - S: zksync_vm2::interface::GlobalStateInterface, - >( + + impl vm2::Tracer for InstructionCount { + fn before_instruction( &mut self, _: &mut S, ) { self.0 += 1; } } - let mut tracer = InstructionCount(0); - vm.0.inspect(&mut tracer, InspectExecutionMode::OneTx); + let (system_env, l1_batch_env) = test_env(); + let mut vm = + vm_fast::Vm::<_, InstructionCount>::custom(l1_batch_env, system_env, &*STORAGE); + vm.push_transaction(tx.clone()); + let mut tracer = InstructionCount(0); + vm.inspect(&mut tracer, InspectExecutionMode::OneTx); tracer.0 } } @@ -135,7 +140,9 @@ impl BenchmarkingVmFactory for Legacy { let storage = StorageView::new(storage).to_rc_ptr(); vm_latest::Vm::new(batch_env, system_env, storage) } +} +impl CountInstructions for Legacy { fn count_instructions(tx: &Transaction) -> usize { let mut vm = BenchmarkingVm::::default(); vm.0.push_transaction(tx.clone()); @@ -150,41 +157,44 @@ impl BenchmarkingVmFactory for Legacy { } } +fn test_env() -> (SystemEnv, L1BatchEnv) { + let timestamp = unix_timestamp_ms(); + let system_env = SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + }; + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: L1BatchNumber(1), + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + }; + (system_env, l1_batch_env) +} + #[derive(Debug)] pub struct BenchmarkingVm(VM::Instance); impl Default for BenchmarkingVm { fn default() -> Self { - let timestamp = unix_timestamp_ms(); - Self(VM::create( - L1BatchEnv { - previous_batch_hash: None, - number: L1BatchNumber(1), - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - }, - SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - &STORAGE, - )) + let (system_env, l1_batch_env) = test_env(); + Self(VM::create(l1_batch_env, system_env, &STORAGE)) } } @@ -231,7 +241,7 @@ impl BenchmarkingVm { // use super::*; // use crate::{ // get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, -// get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, +// get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, BYTECODES, // }; // // #[test] @@ -282,4 +292,22 @@ impl BenchmarkingVm { // let res = vm.run_transaction(&get_heavy_load_test_tx(1)); // assert_matches!(res.result, ExecutionResult::Success { .. }); // } +// +// #[test] +// fn instruction_count_matches_on_both_vms_for_transfer() { +// let tx = get_transfer_tx(0); +// let legacy_count = Legacy::count_instructions(&tx); +// let fast_count = Fast::count_instructions(&tx); +// assert_eq!(legacy_count, fast_count); +// } +// +// #[test] +// fn instruction_count_matches_on_both_vms_for_benchmark_bytecodes() { +// for bytecode in BYTECODES { +// let tx = bytecode.deploy_tx(); +// let legacy_count = Legacy::count_instructions(&tx); +// let fast_count = Fast::count_instructions(&tx); +// assert_eq!(legacy_count, fast_count, "bytecode: {}", bytecode.name); +// } +// } // } diff --git a/etc/env/base/proof_data_handler.toml b/etc/env/base/proof_data_handler.toml index 7a1999a03c3..b56ac26fb17 100644 --- a/etc/env/base/proof_data_handler.toml +++ b/etc/env/base/proof_data_handler.toml @@ -1,4 +1,5 @@ [proof_data_handler] http_port = 3320 proof_generation_timeout_in_secs = 18000 +tee_proof_generation_timeout_in_secs = 600 tee_support = true diff --git a/etc/env/ecosystems/mainnet.yaml b/etc/env/ecosystems/mainnet.yaml index 7d4266e8b76..f7b09150793 100644 --- a/etc/env/ecosystems/mainnet.yaml +++ b/etc/env/ecosystems/mainnet.yaml @@ -1,3 +1,5 @@ +create2_factory_addr: 0xce0042b868300000d44a59004da54a005ffdcf9f +create2_factory_salt: '0x0000000000000000000000000000000000000000000000000000000000000000' ecosystem_contracts: bridgehub_proxy_addr: 0x303a465B659cBB0ab36eE643eA362c509EEb5213 state_transition_proxy_addr: 0xc2eE6b6af7d616f6e27ce7F4A451Aedc2b0F5f5C @@ -17,3 +19,6 @@ l1: verifier_addr: 0x70F3FBf8a427155185Ec90BED8a3434203de9604 validator_timelock_addr: 0x5D8ba173Dc6C3c90C8f7C04C9288BeF5FDbAd06E base_token_addr: '0x0000000000000000000000000000000000000000' +l2: + testnet_paymaster_addr: '0x0000000000000000000000000000000000000000' + default_l2_upgrader: '0x0000000000000000000000000000000000000000' diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 587ba4614a5..5abee904765 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -106,7 +106,7 @@ eth: max_eth_tx_data_size: 120000 aggregated_proof_sizes: [ 1 ] max_aggregated_tx_gas: 15000000 - max_acceptable_priority_fee_in_gwei: 100000000000 + max_acceptable_priority_fee_in_gwei: 100000000000 # typo: value is in wei (100 gwei) pubdata_sending_mode: BLOBS gas_adjuster: default_priority_fee_per_gas: 1000000000 @@ -169,6 +169,7 @@ witness_vector_generator: data_handler: http_port: 3320 proof_generation_timeout_in_secs: 18000 + tee_proof_generation_timeout_in_secs: 600 tee_support: true prover_gateway: api_url: http://127.0.0.1:3320 diff --git a/etc/env/file_based/overrides/mainnet.yaml b/etc/env/file_based/overrides/mainnet.yaml index 7565aac869a..847f9ae98aa 100644 --- a/etc/env/file_based/overrides/mainnet.yaml +++ b/etc/env/file_based/overrides/mainnet.yaml @@ -11,6 +11,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + l1_batch_min_age_before_execute_seconds: 76000 # 21h wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.06 diff --git a/etc/env/file_based/overrides/testnet.yaml b/etc/env/file_based/overrides/testnet.yaml index d36cf9fc7bc..4643a963ed7 100644 --- a/etc/env/file_based/overrides/testnet.yaml +++ b/etc/env/file_based/overrides/testnet.yaml @@ -11,6 +11,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + l1_batch_min_age_before_execute_seconds: 1500 # 25m wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.1 diff --git a/etc/env/file_based/overrides/tests/integration.yaml b/etc/env/file_based/overrides/tests/integration.yaml new file mode 100644 index 00000000000..6ad031e2945 --- /dev/null +++ b/etc/env/file_based/overrides/tests/integration.yaml @@ -0,0 +1,4 @@ +experimental_vm: + # Use the shadow VM mode everywhere to catch divergences as early as possible + state_keeper_fast_vm_mode: SHADOW + api_fast_vm_mode: SHADOW diff --git a/etc/env/file_based/overrides/tests/loadtest-new.yaml b/etc/env/file_based/overrides/tests/loadtest-new.yaml index 2167f7347e0..e66625636b1 100644 --- a/etc/env/file_based/overrides/tests/loadtest-new.yaml +++ b/etc/env/file_based/overrides/tests/loadtest-new.yaml @@ -1,7 +1,11 @@ db: merkle_tree: mode: LIGHTWEIGHT +api: + web3_json_rpc: + estimate_gas_optimize_search: true experimental_vm: state_keeper_fast_vm_mode: NEW + api_fast_vm_mode: NEW mempool: delay_interval: 50 diff --git a/etc/env/file_based/overrides/tests/loadtest-old.yaml b/etc/env/file_based/overrides/tests/loadtest-old.yaml index a2d66d1cf4a..7b1a3587018 100644 --- a/etc/env/file_based/overrides/tests/loadtest-old.yaml +++ b/etc/env/file_based/overrides/tests/loadtest-old.yaml @@ -3,5 +3,6 @@ db: mode: LIGHTWEIGHT experimental_vm: state_keeper_fast_vm_mode: OLD + api_fast_vm_mode: OLD mempool: delay_interval: 50 diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 928d105582f..46feff624f1 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -326,7 +326,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "itoa", "matchit", @@ -341,7 +341,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -651,9 +651,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c681a3f867afe40bcc188e5cb5260bbf5699531823affa3cbe28f7ca9b7bc9" +checksum = "4b63a717789f92f16fd566c78655d64017c690be59e473c3e769080c975a1f9e" dependencies = [ "boojum", "cmake", @@ -694,7 +694,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", - "regex-automata 0.4.6", + "regex-automata 0.4.8", "serde", ] @@ -799,11 +799,11 @@ dependencies = [ [[package]] name = "circuit_definitions" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "492404ea63c934d8e894325f0a741723bf91cd035cb34a92fddd8617c4a00fd3" +checksum = "76be9ee6e75f1f948d175ab9820ecc7189f72154c95ca503a1974012356f5363" dependencies = [ - "circuit_encodings 0.150.6", + "circuit_encodings 0.150.7", "crossbeam", "derivative", "seq-macro", @@ -849,14 +849,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" +checksum = "2501cc688ef391013019495ae7035cfd54f86987e36d10f73976ce4c5d413c5a" dependencies = [ "derivative", "serde", - "zk_evm 0.150.6", - "zkevm_circuits 0.150.6", + "zk_evm 0.150.7", + "zkevm_circuits 0.150.7", ] [[package]] @@ -916,11 +916,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" +checksum = "917d27db531fdd98a51e42ea465bc097f48cc849e7fad68d7856087d15125be1" dependencies = [ - "circuit_encodings 0.150.6", + "circuit_encodings 0.150.7", "derivative", "rayon", "serde", @@ -1605,6 +1605,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "either" version = "1.12.0" @@ -1678,6 +1690,26 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "enum_dispatch" version = "0.3.13" @@ -1741,9 +1773,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e1990fee6e9d25b40524ce53ca7977a211155a17bc7277f4dd354633e4fc22" +checksum = "ad950752eeb44f8938be405b95a1630f82e903f4a7adda355d92aad135fcd382" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1752,9 +1784,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d84e8d300c28cd91ceb56340f66da8607409f44a45f5e694e23723630db8c852" +checksum = "c38607d52509b5db97cc4447c8644d6c5ca84f22ff8a9254f984669b1eb82ed4" dependencies = [ "serde_json", ] @@ -2610,9 +2642,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -2639,7 +2671,7 @@ dependencies = [ "futures-util", "headers", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-util", "pin-project-lite", @@ -2657,7 +2689,7 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "log", "rustls", @@ -2674,7 +2706,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "pin-project-lite", "tokio", @@ -2702,7 +2734,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "native-tls", "tokio", @@ -2712,20 +2744,19 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.5.0", "pin-project-lite", "socket2", "tokio", - "tower", "tower-service", "tracing", ] @@ -3075,7 +3106,7 @@ dependencies = [ "async-trait", "base64 0.22.1", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-util", "jsonrpsee-core", @@ -3086,7 +3117,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] @@ -3206,9 +3237,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" +checksum = "efffeb3df0bd4ef3e5d65044573499c0e4889b988070b08c50b25b1329289a1f" dependencies = [ "k8s-openapi", "kube-client", @@ -3219,9 +3250,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" +checksum = "8bf471ece8ff8d24735ce78dac4d091e9fcb8d74811aeb6b75de4d1c3f5de0f1" dependencies = [ "base64 0.22.1", "bytes", @@ -3232,7 +3263,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-http-proxy", "hyper-rustls", "hyper-timeout", @@ -3243,23 +3274,23 @@ dependencies = [ "pem", "rustls", "rustls-pemfile 2.1.2", - "secrecy", + "secrecy 0.10.3", "serde", "serde_json", "serde_yaml", "thiserror", "tokio", "tokio-util", - "tower", + "tower 0.5.1", "tower-http", "tracing", ] [[package]] name = "kube-core" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" +checksum = "f42346d30bb34d1d7adc5c549b691bce7aa3a1e60254e68fab7e2d7b26fe3d77" dependencies = [ "chrono", "form_urlencoded", @@ -3275,9 +3306,9 @@ dependencies = [ [[package]] name = "kube-derive" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" +checksum = "f9364e04cc5e0482136c6ee8b7fb7551812da25802249f35b3def7aaa31e82ad" dependencies = [ "darling 0.20.10", "proc-macro2 1.0.85", @@ -3288,16 +3319,16 @@ dependencies = [ [[package]] name = "kube-runtime" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" +checksum = "d3fbf1f6ffa98e65f1d2a9a69338bb60605d46be7edf00237784b89e62c9bd44" dependencies = [ "ahash 0.8.11", "async-broadcast", "async-stream", "async-trait", "backoff", - "derivative", + "educe", "futures 0.3.30", "hashbrown 0.14.5", "json-patch", @@ -4423,7 +4454,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.3", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -4769,14 +4800,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.6", - "regex-syntax 0.8.3", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -4790,13 +4821,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.3", + "regex-syntax 0.8.5", ] [[package]] @@ -4807,9 +4838,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rend" @@ -4876,7 +4907,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-tls 0.6.0", "hyper-util", @@ -5321,7 +5352,15 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ - "serde", + "zeroize", +] + +[[package]] +name = "secrecy" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" +dependencies = [ "zeroize", ] @@ -5682,9 +5721,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92776ca824f49c255a7417939706d759e0fd3dd4217420d01da68beae04f0bd6" +checksum = "9d2ac4440b6c23005c43a81cf064b9aa123fbeb992ac91cd04c7d485abb1fbea" dependencies = [ "bincode", "blake2 0.10.6", @@ -6542,7 +6581,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-timeout", "hyper-util", "percent-encoding", @@ -6551,7 +6590,7 @@ dependencies = [ "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -6577,18 +6616,34 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "bitflags 2.6.0", "bytes", "http 1.1.0", "http-body 1.0.0", - "http-body-util", "mime", "pin-project-lite", "tower-layer", @@ -6598,15 +6653,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -7471,9 +7526,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" +checksum = "3cc74fbe2b45fd19e95c59ea792c795feebdb616ebaa463f0ac567f495f47387" dependencies = [ "anyhow", "lazy_static", @@ -7481,7 +7536,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.6", + "zk_evm_abstractions 0.150.7", ] [[package]] @@ -7512,22 +7567,22 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" +checksum = "37f333a3b059899df09e40deb041af881bc03e496fda5eec618ffb5e854ee7df" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", ] [[package]] name = "zkevm-assembly" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc743ac7b0d618536dc3ace798fd4b8af78b057884afda5785c7970e15d62d0" +checksum = "cf011a0c83cbfb175f1e60811f0e0cd56551c9e35df596a762556662c638deb9" dependencies = [ "env_logger 0.9.3", "hex", @@ -7540,7 +7595,7 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", ] [[package]] @@ -7589,9 +7644,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" +checksum = "d06fb35b00699d25175a2ad447f86a9088af8b0bc698bb57086fb04c13e52eab" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7603,7 +7658,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.6", + "zkevm_opcode_defs 0.150.7", "zksync_cs_derive", ] @@ -7651,9 +7706,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" +checksum = "b83f3b279248af4ca86dec20a54127f02110b45570f3f6c1d13df49ba75c28a5" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7668,13 +7723,13 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73ad3e73d290a38a35dd245fd68cb6f498a8a8da4a52f846e88da3d3c31a34fd" +checksum = "d9c801aa17e9009699aacf654588d6adfaeeb8a490b2d9121847c201e2766803" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "codegen", "crossbeam", "derivative", @@ -7695,9 +7750,9 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d555e24b853359c5b076c52f9ff9e0ed62a7edc8c2f82f93517c524410c21ecb" +checksum = "5688dc060456f6c1e790d589f3abd6d9e9a11eb393d7383fbeb23b55961951e0" dependencies = [ "cmake", "crossbeam", @@ -7710,9 +7765,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615dad34e5fe678ec3b3e029af3f19313bebb1b771a8ce963c9ab9a8cc3879d3" +checksum = "5714848e6f8361820346483246dd68b4e7fb05ec41dd6610a8b53fb5c3ca7f3a" dependencies = [ "bit-vec", "cfg-if", @@ -7727,9 +7782,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.151.0" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80721b2da2643bd43f664ac65673ee078e6973c0a88d75b73bfaeac8e1bf5432" +checksum = "52a6a1863818d939d445c53af57e53c222f11c2c94b9a94c3612dd938a3d983c" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -7745,7 +7800,7 @@ dependencies = [ "ethabi", "hex", "num_enum 0.7.2", - "secrecy", + "secrecy 0.8.0", "serde", "serde_json", "serde_with", @@ -7830,7 +7885,7 @@ version = "0.1.0" dependencies = [ "anyhow", "rand 0.8.5", - "secrecy", + "secrecy 0.8.0", "serde", "strum", "strum_macros", @@ -8097,9 +8152,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" +checksum = "dc58af8e4e4ad1a851ffd2275e6a44ead0f15a7eaac9dc9d60a56b3b9c9b08e8" dependencies = [ "boojum", "derivative", @@ -8109,7 +8164,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.6", + "zkevm_circuits 0.150.7", ] [[package]] @@ -8147,7 +8202,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "ethabi", "hex", "itertools 0.10.5", @@ -8159,7 +8214,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.6", + "zk_evm 0.150.7", "zksync_contracts", "zksync_mini_merkle_tree", "zksync_system_constants", @@ -8212,7 +8267,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -8285,7 +8340,7 @@ dependencies = [ "hex", "prost 0.12.6", "rand 0.8.5", - "secrecy", + "secrecy 0.8.0", "serde_json", "serde_yaml", "time", @@ -8439,7 +8494,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.6", + "circuit_sequencer_api 0.150.7", "serde", "serde_with", "strum", @@ -8637,8 +8692,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0cc dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.6", - "zkevm_opcode_defs 0.150.6", + "zk_evm_abstractions 0.150.7", + "zkevm_opcode_defs 0.150.7", "zksync_vm2_interface", ] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index af022e691c1..32c3185f64c 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -32,7 +32,7 @@ indicatif = "0.16" itertools = "0.10.5" jemallocator = "0.5" k8s-openapi = { version = "0.23.0", features = ["v1_30"] } -kube = { version = "0.95.0", features = ["runtime", "derive"] } +kube = { version = "0.96.0", features = ["runtime", "derive"] } local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" @@ -63,13 +63,13 @@ url = "2.5.2" vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.6" -circuit_sequencer_api = "=0.150.6" -zkevm_test_harness = "=0.150.6" +circuit_definitions = "=0.150.7" +circuit_sequencer_api = "=0.150.7" +zkevm_test_harness = "=0.150.7" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.0" } -shivini = "=0.151.0" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.1" } +shivini = "=0.151.1" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs index b800b86f3c2..e3e4c9b4df0 100644 --- a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -29,11 +29,18 @@ where ordered.serialize(serializer) } +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ScaleEvent { + pub name: String, + pub time: DateTime, +} + #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Namespace { #[serde(serialize_with = "ordered_map")] pub deployments: HashMap, pub pods: HashMap, + pub scale_errors: Vec, } #[derive(Debug, Clone, Default, Serialize, Deserialize)] diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index 884174562a1..eb4249d071f 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -21,7 +21,7 @@ struct GPUPool { name: String, gpu: Gpu, provers: HashMap, // TODO: consider using i64 everywhere to avoid type casts. - preemtions: u64, + scale_errors: usize, max_pool_size: u32, } @@ -140,6 +140,11 @@ impl Scaler { .and_then(|inner_map| inner_map.get(&gpu)) .copied() .unwrap_or(0), + scale_errors: namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::hours(1)) // TODO Move the duration into config. + .count(), ..Default::default() }); @@ -147,6 +152,12 @@ impl Scaler { e.provers.insert(PodStatus::Running, 0); } + let recent_scale_errors = namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::minutes(4)) // TODO Move the duration into config. This should be at least x2 or run interval. + .count(); + for ppg in namespace_value .pods .iter() @@ -158,10 +169,12 @@ impl Scaler { ..Default::default() }); let mut status = PodStatus::from_str(&ppg.pod.status).unwrap_or_default(); - if status == PodStatus::Pending - && ppg.pod.changed < Utc::now() - self.long_pending_duration - { - status = PodStatus::LongPending; + if status == PodStatus::Pending { + if ppg.pod.changed < Utc::now() - self.long_pending_duration { + status = PodStatus::LongPending; + } else if recent_scale_errors > 0 { + status = PodStatus::NeedToMove; + } } tracing::info!( "pod {}: status: {}, real status: {}", @@ -172,7 +185,7 @@ impl Scaler { e.provers.entry(status).and_modify(|n| *n += 1).or_insert(1); } - tracing::info!("From pods {:?}", gp_map.sorted_debug()); + tracing::debug!("From pods {:?}", gp_map.sorted_debug()); gp_map.into_values().collect() } @@ -195,7 +208,7 @@ impl Scaler { a.sum_by_pod_status(PodStatus::LongPending) .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), ) // Sort by long Pending pods. - .then(a.preemtions.cmp(&b.preemtions)) // Sort by preemtions in the cluster. + .then(a.scale_errors.cmp(&b.scale_errors)) // Sort by scale_errors in the cluster. .then( self.cluster_priorities .get(&a.name) @@ -455,6 +468,7 @@ mod tests { }, )] .into(), + ..Default::default() }, )] .into(), @@ -521,6 +535,7 @@ mod tests { }, )] .into(), + ..Default::default() }, )] .into(), @@ -681,6 +696,7 @@ mod tests { ) ] .into(), + ..Default::default() }, )] .into(), @@ -718,6 +734,7 @@ mod tests { ) ] .into(), + ..Default::default() }, )] .into(), diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs index f94dfc3704f..5384db082bc 100644 --- a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, sync::Arc}; -use chrono::Utc; +use chrono::{DateTime, Utc}; use futures::{stream, StreamExt, TryStreamExt}; use k8s_openapi::api; use kube::{ @@ -9,7 +9,7 @@ use kube::{ }; use tokio::sync::Mutex; -use crate::cluster_types::{Cluster, Deployment, Namespace, Pod}; +use crate::cluster_types::{Cluster, Deployment, Namespace, Pod, ScaleEvent}; #[derive(Clone)] pub struct Watcher { @@ -62,6 +62,15 @@ impl Watcher { .map_ok(Watched::Pod) .boxed(), ); + + let events: Api = Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(events, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Event) + .boxed(), + ); } // select on applied events from all watchers let mut combo_stream = stream::select_all(watchers); @@ -70,61 +79,92 @@ impl Watcher { enum Watched { Deploy(api::apps::v1::Deployment), Pod(api::core::v1::Pod), + Event(api::core::v1::Event), } - while let Some(o) = combo_stream.try_next().await? { + while let Some(o) = combo_stream.next().await { match o { - Watched::Deploy(d) => { - let namespace = match d.namespace() { - Some(n) => n.to_string(), - None => continue, - }; - let mut cluster = self.cluster.lock().await; - let v = cluster.namespaces.get_mut(&namespace).unwrap(); - let dep = v - .deployments - .entry(d.name_any()) - .or_insert(Deployment::default()); - let nums = d.status.clone().unwrap_or_default(); - dep.running = nums.available_replicas.unwrap_or_default(); - dep.desired = nums.replicas.unwrap_or_default(); + Ok(o) => match o { + Watched::Deploy(d) => { + let namespace = match d.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let dep = v + .deployments + .entry(d.name_any()) + .or_insert(Deployment::default()); + let nums = d.status.clone().unwrap_or_default(); + dep.running = nums.available_replicas.unwrap_or_default(); + dep.desired = nums.replicas.unwrap_or_default(); - tracing::info!( - "Got deployment: {}, size: {}/{} un {}", - d.name_any(), - nums.available_replicas.unwrap_or_default(), - nums.replicas.unwrap_or_default(), - nums.unavailable_replicas.unwrap_or_default(), - ) - } - Watched::Pod(p) => { - let namespace = match p.namespace() { - Some(n) => n.to_string(), - None => continue, - }; - let mut cluster = self.cluster.lock().await; - let v = cluster.namespaces.get_mut(&namespace).unwrap(); - let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); - pod.owner = p - .owner_references() - .iter() - .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) - .collect::>() - .join(":"); - // TODO: Collect replica sets to match deployments and pods. - let phase = p - .status - .clone() - .unwrap_or_default() - .phase - .unwrap_or_default(); - if phase != pod.status { - // TODO: try to get an idea how to set correct value on restart. - pod.changed = Utc::now(); + tracing::info!( + "Got deployment: {}, size: {}/{} un {}", + d.name_any(), + nums.available_replicas.unwrap_or_default(), + nums.replicas.unwrap_or_default(), + nums.unavailable_replicas.unwrap_or_default(), + ) } - pod.status = phase; + Watched::Pod(p) => { + let namespace = match p.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); + pod.owner = p + .owner_references() + .iter() + .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) + .collect::>() + .join(":"); + // TODO: Collect replica sets to match deployments and pods. + let phase = p + .status + .clone() + .unwrap_or_default() + .phase + .unwrap_or_default(); + if phase != pod.status { + // TODO: try to get an idea how to set correct value on restart. + pod.changed = Utc::now(); + } + pod.status = phase; - tracing::info!("Got pod: {}", p.name_any()) - } + tracing::info!("Got pod: {}", p.name_any()) + } + Watched::Event(e) => { + let namespace: String = match e.namespace() { + Some(n) => n, + None => "".into(), + }; + let name = e.name_any(); + let reason = e.reason.unwrap_or_default(); + if reason != "FailedScaleUp" { + // Ignore all events which are not scale issues. + continue; + } + let time: DateTime = match e.last_timestamp { + Some(t) => t.0, + None => Utc::now(), + }; + tracing::debug!( + "Got event: {}/{}, message: {:?}; action: {:?}, reason: {:?}", + namespace, + name, + e.message.unwrap_or_default(), + e.action.unwrap_or_default(), + reason + ); + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + v.scale_errors.push(ScaleEvent { name, time }) + } + }, + Err(err) => tracing::warn!("Error during watch: {err:?}"), } } diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs index 0dcc8e03378..0f4d3673a7d 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs @@ -30,7 +30,6 @@ pub async fn deploy_l1( let default_genesis_config = GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) .context("failed reading genesis config")?; - dbg!(2); let wallets_config = config.get_wallets()?; // For deploying ecosystem we only need genesis batch params From c291a2b2d1e8381d28f19596bf1736974c853401 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 29 Oct 2024 16:04:50 +0200 Subject: [PATCH 140/140] chore: restore DA hash test (#3191) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Restore test + clean up ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/scripts/gateway.sh | 50 ------ bin/last_chain_id | 1 - bin/start_sync_layer_gateway.sh | 22 --- bin/start_sync_layer_l3_chain.sh | 30 ---- core/lib/basic_types/src/vm_version.rs | 23 --- core/lib/multivm/src/versions/shadow/tests.rs | 40 ++++- .../src/versions/testonly/l1_messenger.rs | 166 ++++++++++++++++++ core/lib/multivm/src/versions/testonly/mod.rs | 1 + .../multivm/src/versions/testonly/refunds.rs | 9 +- .../src/versions/testonly/tester/mod.rs | 15 +- .../versions/vm_fast/tests/l1_messenger.rs | 6 + .../multivm/src/versions/vm_fast/tests/mod.rs | 19 +- .../versions/vm_latest/tests/l1_messenger.rs | 165 +---------------- .../src/versions/vm_latest/tests/mod.rs | 15 +- core/lib/vm_interface/src/pubdata/mod.rs | 4 +- 15 files changed, 263 insertions(+), 303 deletions(-) delete mode 100755 .github/scripts/gateway.sh delete mode 100644 bin/last_chain_id delete mode 100755 bin/start_sync_layer_gateway.sh delete mode 100755 bin/start_sync_layer_l3_chain.sh delete mode 100644 core/lib/basic_types/src/vm_version.rs create mode 100644 core/lib/multivm/src/versions/testonly/l1_messenger.rs create mode 100644 core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs diff --git a/.github/scripts/gateway.sh b/.github/scripts/gateway.sh deleted file mode 100755 index ed9e8a32dc8..00000000000 --- a/.github/scripts/gateway.sh +++ /dev/null @@ -1,50 +0,0 @@ -sudo rm -rf ./volumes && zk_supervisor clean containers && zk_inception up -o false - -zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ - --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_era \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_era \ - --ignore-prerequisites --observability=false --skip-submodules-checkout \ - --chain era \ - # --skip-contract-compilation-override \ - -zk_inception chain create \ - --chain-name gateway \ - --chain-id 505 \ - --prover-mode no-proofs \ - --wallet-creation localhost \ - --l1-batch-commit-data-generator-mode rollup \ - --base-token-address 0x0000000000000000000000000000000000000001 \ - --base-token-price-nominator 1 \ - --base-token-price-denominator 1 \ - --set-as-default false \ - --ignore-prerequisites --skip-submodules-checkout --skip-contract-compilation-override - -zk_inception chain init \ - --deploy-paymaster \ - --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_gateway \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_gateway \ - --chain gateway --skip-submodules-checkout - -zk_inception chain convert-to-gateway --chain gateway --ignore-prerequisites - -zk_inception server --ignore-prerequisites --chain gateway &> ./gateway.log & - -sleep 20 - -zk_inception chain migrate-to-gateway --chain era --gateway-chain-name gateway - -zk_inception chain migrate-from-gateway --chain era --gateway-chain-name gateway - -zk_inception chain migrate-to-gateway --chain era --gateway-chain-name gateway - -zk_inception server --ignore-prerequisites --chain era &> ./rollup.log & - -sleep 20 - -zk_supervisor test integration --no-deps --ignore-prerequisites --chain era diff --git a/bin/last_chain_id b/bin/last_chain_id deleted file mode 100644 index 900731ffd51..00000000000 --- a/bin/last_chain_id +++ /dev/null @@ -1 +0,0 @@ -64 diff --git a/bin/start_sync_layer_gateway.sh b/bin/start_sync_layer_gateway.sh deleted file mode 100755 index 0815c9affa3..00000000000 --- a/bin/start_sync_layer_gateway.sh +++ /dev/null @@ -1,22 +0,0 @@ -set -euox pipefail - -zk -zk env dev -zk config compile -zk down -rm -rf "$ZKSYNC_HOME"db/* -rm -f gateway_server.log -mkdir -p "$ZKSYNC_HOME"volumes/postgres -mkdir -p "$ZKSYNC_HOME"volumes/reth/data -zk init -- --should-check-postgres false --skip-submodules-checkout - -zk server &>gateway_server.log & -sleep 5 - -#Prepare the server to be the synclayer -zk dev2 supply-rich-wallets -zk contract build --zkSync -zk contract prepare-sync-layer -zk contract register-sync-layer-counterpart - -sleep 360000; diff --git a/bin/start_sync_layer_l3_chain.sh b/bin/start_sync_layer_l3_chain.sh deleted file mode 100755 index 87b07817848..00000000000 --- a/bin/start_sync_layer_l3_chain.sh +++ /dev/null @@ -1,30 +0,0 @@ -set -euox pipefail - - -CHAIN_ID=$( last_chain_id - -DIFF=$((CHAIN_ID * 100)) -CHAIN_ID=$((CHAIN_ID + 1000)) - -rm -f l3_server_pre_gateway.log - -#Prepare launch sync-layer based chain -zk env dev -zk config prepare-l1-hyperchain --env-name test-chain --chain-id $CHAIN_ID -zk env test-chain -zk config compile test-chain --diff $DIFF -zk init hyper -- --skip-contract-compilation -zk server --time-to-live 40 &>l3_server_pre_gateway.log -sleep 40 - -export ETH_SENDER_SENDER_TX_AGGREGATION_PAUSED=true -zk server --time-to-live 20 - -zk contract migrate-to-sync-layer -zk contract prepare-sync-layer-validators -zk contract update-config-for-sync-layer - -export ETH_SENDER_SENDER_TX_AGGREGATION_PAUSED=false -zk server diff --git a/core/lib/basic_types/src/vm_version.rs b/core/lib/basic_types/src/vm_version.rs deleted file mode 100644 index 47034ebd8a7..00000000000 --- a/core/lib/basic_types/src/vm_version.rs +++ /dev/null @@ -1,23 +0,0 @@ -#[derive(Debug, Clone, Copy)] -pub enum VmVersion { - M5WithoutRefunds, - M5WithRefunds, - M6Initial, - M6BugWithCompressionFixed, - Vm1_3_2, - VmVirtualBlocks, - VmVirtualBlocksRefundsEnhancement, - VmBoojumIntegration, - Vm1_4_1, - Vm1_4_2, - Vm1_5_0SmallBootloaderMemory, - Vm1_5_0IncreasedBootloaderMemory, - VmSyncLayer, -} - -impl VmVersion { - /// Returns the latest supported VM version. - pub const fn latest() -> VmVersion { - Self::VmSyncLayer - } -} diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs index 78fd29809a9..0909c53923b 100644 --- a/core/lib/multivm/src/versions/shadow/tests.rs +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -3,7 +3,7 @@ use std::{collections::HashSet, rc::Rc}; use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H256, U256}; -use zksync_vm_interface::pubdata::PubdataBuilder; +use zksync_vm_interface::pubdata::{PubdataBuilder, PubdataInput}; use super::ShadowedFastVm; use crate::{ @@ -120,11 +120,30 @@ impl TestedVm for ShadowedFastVm { }); } - fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { - self.get_mut("push_transaction_with_refund", |r| match r { - ShadowMut::Main(vm) => vm.push_transaction_with_refund(tx.clone(), refund), - ShadowMut::Shadow(vm) => vm.push_transaction_with_refund(tx.clone(), refund), - }); + fn push_transaction_with_refund_and_compression( + &mut self, + tx: Transaction, + refund: u64, + compression: bool, + ) { + self.get_mut( + "push_transaction_with_refund_and_compression", + |r| match r { + ShadowMut::Main(vm) => { + vm.push_transaction_with_refund_and_compression(tx.clone(), refund, compression) + } + ShadowMut::Shadow(vm) => { + vm.push_transaction_with_refund_and_compression(tx.clone(), refund, compression) + } + }, + ); + } + + fn pubdata_input(&self) -> PubdataInput { + self.get("pubdata_input", |r| match r { + ShadowRef::Main(vm) => vm.pubdata_input(), + ShadowRef::Shadow(vm) => vm.pubdata_input(), + }) } } @@ -234,6 +253,15 @@ impl TestedVm for ShadowedFastVm { // } // } // +// mod l1_messenger { +// use crate::versions::testonly::l1_messenger::*; +// +// #[test] +// fn rollup_da_output_hash_match() { +// test_rollup_da_output_hash_match::(); +// } +// } +// // mod l1_tx_execution { // use crate::versions::testonly::l1_tx_execution::*; // diff --git a/core/lib/multivm/src/versions/testonly/l1_messenger.rs b/core/lib/multivm/src/versions/testonly/l1_messenger.rs new file mode 100644 index 00000000000..5f72560d9fd --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/l1_messenger.rs @@ -0,0 +1,166 @@ +// TODO: move to shared tests + +use std::rc::Rc; + +use ethabi::Token; +use zksync_contracts::{l1_messenger_contract, l2_rollup_da_validator_bytecode}; +use zksync_test_account::TxType; +use zksync_types::{ + web3::keccak256, Address, Execute, ProtocolVersionId, L1_MESSENGER_ADDRESS, U256, +}; +use zksync_utils::{address_to_h256, u256_to_h256}; + +use super::{read_test_contract, ContractToDeploy, TestedVm, VmTesterBuilder}; +use crate::{ + interface::{ + pubdata::{PubdataBuilder, PubdataInput}, + InspectExecutionMode, TxExecutionMode, VmInterfaceExt, + }, + pubdata_builders::RollupPubdataBuilder, + vm_latest::constants::ZK_SYNC_BYTES_PER_BLOB, +}; + +const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 5; +const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 6; + +fn encoded_uncompressed_state_diffs(input: &PubdataInput) -> Vec { + let mut result = vec![]; + for state_diff in input.state_diffs.iter() { + result.extend(state_diff.encode_padded()); + } + result +} + +fn compose_header_for_l1_commit_rollup(input: PubdataInput) -> Vec { + // The preimage under the hash `l2DAValidatorOutputHash` is expected to be in the following format: + // - First 32 bytes are the hash of the uncompressed state diff. + // - Then, there is a 32-byte hash of the full pubdata. + // - Then, there is the 1-byte number of blobs published. + // - Then, there are linear hashes of the published blobs, 32 bytes each. + + let mut full_header = vec![]; + + let uncompressed_state_diffs = encoded_uncompressed_state_diffs(&input); + let uncompressed_state_diffs_hash = keccak256(&uncompressed_state_diffs); + full_header.extend(uncompressed_state_diffs_hash); + + let pubdata_builder = RollupPubdataBuilder::new(Address::zero()); + let mut full_pubdata = + pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::latest()); + let full_pubdata_hash = keccak256(&full_pubdata); + full_header.extend(full_pubdata_hash); + + // Now, we need to calculate the linear hashes of the blobs. + // Firstly, let's pad the pubdata to the size of the blob. + if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { + let padding = + vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; + full_pubdata.extend(padding); + } + full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); + + full_pubdata + .chunks(ZK_SYNC_BYTES_PER_BLOB) + .for_each(|chunk| { + full_header.extend(keccak256(chunk)); + }); + + full_header +} + +pub(crate) fn test_rollup_da_output_hash_match() { + // In this test, we check whether the L2 DA output hash is as expected. + + let l2_da_validator_address = Address::repeat_byte(0x12); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy { + bytecode: l2_rollup_da_validator_bytecode(), + address: l2_da_validator_address, + is_account: false, + is_funded: false, + }]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + // Firstly, deploy tx. It should publish the bytecode of the "test contract" + let counter = read_test_contract(); + + let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; + // We do not use compression here, to have the bytecode published in full. + vm.vm + .push_transaction_with_refund_and_compression(tx, 0, false); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + // Then, we call the l1 messenger to also send an L2->L1 message. + let l1_messenger_contract = l1_messenger_contract(); + let encoded_data = l1_messenger_contract + .function("sendToL1") + .unwrap() + .encode_input(&[Token::Bytes(vec![])]) + .unwrap(); + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(L1_MESSENGER_ADDRESS), + calldata: encoded_data, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + let pubdata_builder = RollupPubdataBuilder::new(l2_da_validator_address); + let batch_result = vm.vm.finish_batch(Rc::new(pubdata_builder)); + assert!( + !batch_result.block_tip_execution_result.result.is_failed(), + "Transaction wasn't successful {:?}", + batch_result.block_tip_execution_result.result + ); + let pubdata_input = vm.vm.pubdata_input(); + + // Just to double check that the test makes sense. + assert!(!pubdata_input.user_logs.is_empty()); + assert!(!pubdata_input.l2_to_l1_messages.is_empty()); + assert!(!pubdata_input.published_bytecodes.is_empty()); + assert!(!pubdata_input.state_diffs.is_empty()); + + let expected_header: Vec = compose_header_for_l1_commit_rollup(pubdata_input); + + let l2_da_validator_output_hash = batch_result + .block_tip_execution_result + .logs + .system_l2_to_l1_logs + .iter() + .find(|log| log.0.key == u256_to_h256(L2_DA_VALIDATOR_OUTPUT_HASH_KEY.into())) + .unwrap() + .0 + .value; + + assert_eq!( + l2_da_validator_output_hash, + keccak256(&expected_header).into() + ); + + let l2_used_da_validator_address = batch_result + .block_tip_execution_result + .logs + .system_l2_to_l1_logs + .iter() + .find(|log| log.0.key == u256_to_h256(USED_L2_DA_VALIDATOR_ADDRESS_KEY.into())) + .unwrap() + .0 + .value; + + assert_eq!( + l2_used_da_validator_address, + address_to_h256(&l2_da_validator_address) + ); +} diff --git a/core/lib/multivm/src/versions/testonly/mod.rs b/core/lib/multivm/src/versions/testonly/mod.rs index eece1d475bb..faa05168172 100644 --- a/core/lib/multivm/src/versions/testonly/mod.rs +++ b/core/lib/multivm/src/versions/testonly/mod.rs @@ -42,6 +42,7 @@ pub(super) mod default_aa; pub(super) mod gas_limit; pub(super) mod get_used_contracts; pub(super) mod is_write_initial; +pub(super) mod l1_messenger; pub(super) mod l1_tx_execution; pub(super) mod l2_blocks; pub(super) mod nonce_holder; diff --git a/core/lib/multivm/src/versions/testonly/refunds.rs b/core/lib/multivm/src/versions/testonly/refunds.rs index edab843be4f..cd779f49bf0 100644 --- a/core/lib/multivm/src/versions/testonly/refunds.rs +++ b/core/lib/multivm/src/versions/testonly/refunds.rs @@ -56,8 +56,11 @@ pub(crate) fn test_predetermined_refunded_gas() { .build::(); assert_eq!(account.address(), vm.rich_accounts[0].address()); - vm.vm - .push_transaction_with_refund(tx.clone(), result.refunds.gas_refunded); + vm.vm.push_transaction_with_refund_and_compression( + tx.clone(), + result.refunds.gas_refunded, + true, + ); let result_with_predefined_refunds = vm .vm @@ -112,7 +115,7 @@ pub(crate) fn test_predetermined_refunded_gas() { let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; vm.vm - .push_transaction_with_refund(tx, changed_operator_suggested_refund); + .push_transaction_with_refund_and_compression(tx, changed_operator_suggested_refund, true); let result = vm .vm .finish_batch(default_pubdata_builder()) diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs index 716b9386235..c45d0a6b470 100644 --- a/core/lib/multivm/src/versions/testonly/tester/mod.rs +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -8,7 +8,8 @@ use zksync_types::{ Address, L1BatchNumber, StorageKey, Transaction, H256, U256, }; use zksync_vm_interface::{ - pubdata::PubdataBuilder, CurrentExecutionState, InspectExecutionMode, VmExecutionResultAndLogs, + pubdata::{PubdataBuilder, PubdataInput}, + CurrentExecutionState, InspectExecutionMode, VmExecutionResultAndLogs, VmInterfaceHistoryEnabled, }; @@ -226,6 +227,14 @@ pub(crate) trait TestedVm: /// Same as `start_new_l2_block`, but should skip consistency checks (to verify they are performed by the bootloader). fn push_l2_block_unchecked(&mut self, block: L2BlockEnv); - /// Pushes a transaction with predefined refund value. - fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64); + /// Pushes a transaction with predefined refund value and compression. + fn push_transaction_with_refund_and_compression( + &mut self, + tx: Transaction, + refund: u64, + compression: bool, + ); + + /// Returns pubdata input. + fn pubdata_input(&self) -> PubdataInput; } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs new file mode 100644 index 00000000000..0bd01c7de13 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_messenger.rs @@ -0,0 +1,6 @@ +use crate::{versions::testonly::l1_messenger::test_rollup_da_output_hash_match, vm_fast::Vm}; + +#[test] +fn rollup_da_output_hash_match() { + test_rollup_da_output_hash_match::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index b29ca6ed7f8..4ed1f35d3fb 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -4,8 +4,9 @@ use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H160, H256, use zksync_utils::h256_to_u256; use zksync_vm2::interface::{Event, HeapId, StateInterface}; use zksync_vm_interface::{ - pubdata::PubdataBuilder, storage::ReadStorage, CurrentExecutionState, L2BlockEnv, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + pubdata::{PubdataBuilder, PubdataInput}, + storage::ReadStorage, + CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, }; use super::Vm; @@ -24,6 +25,7 @@ use crate::{ // mod gas_limit; // mod get_used_contracts; // mod is_write_initial; +// mod l1_messenger; // mod l1_tx_execution; // mod l2_blocks; // mod nonce_holder; @@ -158,7 +160,16 @@ impl TestedVm for Vm> { self.bootloader_state.push_l2_block(block); } - fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { - self.push_transaction_inner(tx, refund, true); + fn push_transaction_with_refund_and_compression( + &mut self, + tx: Transaction, + refund: u64, + compression: bool, + ) { + self.push_transaction_inner(tx, refund, compression); + } + + fn pubdata_input(&self) -> PubdataInput { + todo!() } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs index fcb718c7349..f1dade9dd8e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs @@ -1,156 +1,9 @@ -// // TODO: move to shared tests -// -// use ethabi::Token; -// use zksync_contracts::l1_messenger_contract; -// use zksync_types::{web3::keccak256, Execute, L1_MESSENGER_ADDRESS, U256}; -// use zksync_utils::{address_to_h256, u256_to_h256}; -// -// use crate::{ -// interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, pubdata::{PubdataInput, PubdataBuilder}}, -// vm_latest::{ -// constants::ZK_SYNC_BYTES_PER_BLOB, -// tests::{ -// tester::{DeployContractsTx, TxType, VmTesterBuilder}, -// utils::read_test_contract, -// }, -// HistoryEnabled, -// }, -// pubdata_builders::RollupPubdataBuilder -// }; -// -// pub(crate) const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 5; -// pub(crate) const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 6; -// -// pub(crate) fn encoded_uncompressed_state_diffs(input: &PubdataInput) -> Vec { -// let mut result = vec![]; -// for state_diff in input.state_diffs.iter() { -// result.extend(state_diff.encode_padded()); -// } -// result -// } -// -// pub fn compose_header_for_l1_commit_rollup(input: PubdataInput) -> Vec { -// // The preimage under the hash `l2DAValidatorOutputHash` is expected to be in the following format: -// // - First 32 bytes are the hash of the uncompressed state diff. -// // - Then, there is a 32-byte hash of the full pubdata. -// // - Then, there is the 1-byte number of blobs published. -// // - Then, there are linear hashes of the published blobs, 32 bytes each. -// -// let mut full_header = vec![]; -// -// let uncompressed_state_diffs = encoded_uncompressed_state_diffs(&input); -// let uncompressed_state_diffs_hash = keccak256(&uncompressed_state_diffs); -// full_header.extend(uncompressed_state_diffs_hash); -// -// let mut full_pubdata = RollupPubdataBuilder::new().build_pubdata(input, false); -// let full_pubdata_hash = keccak256(&full_pubdata); -// full_header.extend(full_pubdata_hash); -// -// // Now, we need to calculate the linear hashes of the blobs. -// // Firstly, let's pad the pubdata to the size of the blob. -// if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { -// let padding = -// vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; -// full_pubdata.extend(padding); -// } -// full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); -// -// full_pubdata -// .chunks(ZK_SYNC_BYTES_PER_BLOB) -// .for_each(|chunk| { -// full_header.extend(keccak256(chunk)); -// }); -// -// full_header -// } -// -// #[test] -// fn test_publish_and_clear_state() { -// // In this test, we check whether the L2 DA output hash is as expected. -// // We will publish 320kb worth of pubdata. -// // It should produce 3 blobs. -// -// let mut vm = VmTesterBuilder::new(HistoryEnabled) -// .with_empty_in_memory_storage() -// .with_execution_mode(TxExecutionMode::VerifyExecute) -// .with_random_rich_accounts(1) -// .build(); -// -// let account = &mut vm.rich_accounts[0]; -// -// // Firstly, deploy tx. It should publish the bytecode of the "test contract" -// let counter = read_test_contract(); -// -// let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); -// // We do not use compression here, to have the bytecode published in full. -// vm.vm.push_transaction_with_compression(tx, false); -// let result = vm.vm.execute(VmExecutionMode::OneTx); -// assert!(!result.result.is_failed(), "Transaction wasn't successful"); -// -// // Then, we call the l1 messenger to also send an L2->L1 message. -// let l1_messenger_contract = l1_messenger_contract(); -// let encoded_data = l1_messenger_contract -// .function("sendToL1") -// .unwrap() -// .encode_input(&[Token::Bytes(vec![])]) -// .unwrap(); -// -// let tx = account.get_l2_tx_for_execute( -// Execute { -// contract_address: Some(L1_MESSENGER_ADDRESS), -// calldata: encoded_data, -// value: U256::zero(), -// factory_deps: vec![], -// }, -// None, -// ); -// vm.vm.push_transaction(tx); -// let result = vm.vm.execute(VmExecutionMode::OneTx); -// assert!(!result.result.is_failed(), "Transaction wasn't successful"); -// -// let batch_result = vm.vm.execute(VmExecutionMode::Batch); -// if batch_result.result.is_failed() { -// panic!("Batch execution failed: {:?}", batch_result.result); -// } -// assert!( -// !batch_result.result.is_failed(), -// "Transaction wasn't successful" -// ); -// let pubdata_input = vm.vm.bootloader_state.get_pubdata_information().clone(); -// -// // Just to double check that the test makes sense. -// assert!(!pubdata_input.user_logs.is_empty()); -// assert!(!pubdata_input.l2_to_l1_messages.is_empty()); -// assert!(!pubdata_input.published_bytecodes.is_empty()); -// assert!(!pubdata_input.state_diffs.is_empty()); -// -// let expected_header: Vec = compose_header_for_l1_commit_rollup(pubdata_input); -// -// let l2_da_validator_output_hash = batch_result -// .logs -// .system_l2_to_l1_logs -// .iter() -// .find(|log| log.0.key == u256_to_h256(L2_DA_VALIDATOR_OUTPUT_HASH_KEY.into())) -// .unwrap() -// .0 -// .value; -// -// assert_eq!( -// l2_da_validator_output_hash, -// keccak256(&expected_header).into() -// ); -// -// let l2_used_da_validator_address = batch_result -// .logs -// .system_l2_to_l1_logs -// .iter() -// .find(|log| log.0.key == u256_to_h256(USED_L2_DA_VALIDATOR_ADDRESS_KEY.into())) -// .unwrap() -// .0 -// .value; -// -// assert_eq!( -// l2_used_da_validator_address, -// address_to_h256(&vm.vm.system_env.pubdata_params.l2_da_validator_address) -// ); -// } +use crate::{ + versions::testonly::l1_messenger::test_rollup_da_output_hash_match, + vm_latest::{HistoryEnabled, Vm}, +}; + +#[test] +fn rollup_da_output_hash_match() { + test_rollup_da_output_hash_match::>(); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index 9d75aba9208..3303b709af5 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -10,7 +10,7 @@ use zk_evm_1_5_0::{ }; use zksync_types::{writes::StateDiffRecord, StorageKey, StorageValue, Transaction, H256, U256}; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use zksync_vm_interface::pubdata::PubdataBuilder; +use zksync_vm_interface::pubdata::{PubdataBuilder, PubdataInput}; use super::{HistoryEnabled, Vm}; use crate::{ @@ -181,10 +181,19 @@ impl TestedVm for TestedLatestVm { self.bootloader_state.push_l2_block(block); } - fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + fn push_transaction_with_refund_and_compression( + &mut self, + tx: Transaction, + refund: u64, + compression: bool, + ) { let tx = TransactionData::new(tx, false); let overhead = tx.overhead_gas(); - self.push_raw_transaction(tx, overhead, refund, true) + self.push_raw_transaction(tx, overhead, refund, compression) + } + + fn pubdata_input(&self) -> PubdataInput { + self.bootloader_state.get_pubdata_information().clone() } } diff --git a/core/lib/vm_interface/src/pubdata/mod.rs b/core/lib/vm_interface/src/pubdata/mod.rs index f901687b5fa..c0b6e744dfc 100644 --- a/core/lib/vm_interface/src/pubdata/mod.rs +++ b/core/lib/vm_interface/src/pubdata/mod.rs @@ -13,7 +13,7 @@ use zksync_types::{ /// bytes32 value; /// } /// ``` -#[derive(Debug, Default, Clone, PartialEq)] +#[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct L1MessengerL2ToL1Log { pub l2_shard_id: u8, pub is_service: bool, @@ -63,7 +63,7 @@ impl From for L2ToL1Log { } /// Struct based on which the pubdata blob is formed -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, Eq, PartialEq)] pub struct PubdataInput { pub user_logs: Vec, pub l2_to_l1_messages: Vec>,